In [10]:
# Requirements
##Installs
def check_dependencies_already_installed() -> bool:
    try:
        import gdown
        return True
    except ImportError:
        return False


def install_dependencies():
    !pip install 'tensorflow_addons' 'tensorflow-determinism' 'gdown'


dependencies_already_installed = check_dependencies_already_installed()
print(f'dependencies_already_installed: {dependencies_already_installed}')
if not dependencies_already_installed:
    install_dependencies()

##Imports
import os
import random
import json
from enum import Enum
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras import layers
from tensorflow.python.data.ops.dataset_ops import Dataset
from tensorflow.python.distribute.tpu_strategy import TPUStrategy
import tensorflow_addons as tfa
from skimage.metrics import structural_similarity
from scipy.stats import wasserstein_distance
import matplotlib.pyplot as plt
import numpy as np
from tqdm import tqdm
from PIL import Image
from pathlib import Path
from datetime import datetime
from zipfile import ZipFile


# Environment setup
##Set tensorflow deterministic mode
def set_tf_deterministic_mode():
    os.environ['TF_DETERMINISTIC_OPS'] = '1'
    os.environ['TF_CUDNN_DETERMINISTIC'] = '1'


##Set random seed
def set_training_random_seed(seed: int):
    random.seed(seed)
    tf.random.set_seed(seed)
    np.random.seed(seed)
    os.environ['PYTHONHASHSEED'] = str(seed)
    print(f'set_training_random_seed() - seed value: {seed}')


##Connect to strongest available device
set_tf_deterministic_mode()


def choose_strongest_available_device_strategy():
    try:
        tpu = tf.distribute.cluster_resolver.TPUClusterResolver()
        tf.config.experimental_connect_to_cluster(tpu)
        tf.tpu.experimental.initialize_tpu_system(tpu)
        selected_strategy = TPUStrategy(tpu)
    except:
        selected_strategy = tf.distribute.get_strategy()

    print(f"choose_strongest_available_device_strategy() - selected strategy type: {type(selected_strategy).__name__}")

    # todo itay - delete this section so it won't mess up in google colab
    gpu_is_available = any(tf.config.list_physical_devices('GPU'))
    if gpu_is_available:
        !nvidia-smi

    print(f'\n\n*** running_on_tpu - {isinstance(selected_strategy, TPUStrategy)} ***\n\n')
    return selected_strategy



DEVICE_STRATEGY = choose_strongest_available_device_strategy()

# todo itay - delete
assert tf.__version__ == '2.6.4'

##Download competition dataset
LOCAL_DATASET_FOLDER_PATH = Path('./train_data/gan-getting-started/')


def download_competition_dataset_if_not_present():
    dataset_already_downloaded = LOCAL_DATASET_FOLDER_PATH.exists()
    print(f"dataset_already_downloaded: {dataset_already_downloaded}")
    if not dataset_already_downloaded:
        # note - this is the untouched competition dataset. just uploaded it to the drive so it'll be
        # available via colab as well.
        !gdown '1ZwcoO11NKhYsbuM7hzdSzKjGOnOx6X94'
        !mkdir -p {LOCAL_DATASET_FOLDER_PATH}
        !unzip -o -q ./gan-getting-started.zip -d {LOCAL_DATASET_FOLDER_PATH}


download_competition_dataset_if_not_present()

#Choose 30 monet train images
class TrainImagesSelectionMethod(Enum):
    RandomSelection = 'random_selection'
    FarthestImagesByPixelDistance = 'farthest_images_by_pixel_distance'
    ClosestImagesByPixelDistance = 'closest_images_by_pixel_distance'
    FarthestImagesByStructuralDistance = 'farthest_images_by_structural_distance'
    ClosestImagesByStructuralDistance = 'closest_images_by_structural_distance'
    FarthestImagesByEarthMoversDistance = 'farthest_images_by_earth_movers_distance'
    ClosestImagesByEarthMoversDistance = 'closest_images_by_earth_movers_distance'


def _choose_30_monet_train_images(
        original_ordered_monet_dataset: Dataset, method: TrainImagesSelectionMethod,
        experiment_random_seed: int, use_preprocessed_cache: bool
) -> Dataset:
    set_training_random_seed(seed=42)

    try:
        original_ordered_monet_images = list(original_ordered_monet_dataset)
        if use_preprocessed_cache:
            preprocessed_indices_cache = {
                TrainImagesSelectionMethod.RandomSelection: [203, 266, 152, 9, 233, 226, 196, 109, 5, 175, 237, 57, 218, 45, 182, 221, 289, 211, 148, 165, 78, 113, 249, 250, 104, 42, 281, 295, 157, 238],
                TrainImagesSelectionMethod.FarthestImagesByPixelDistance: [57, 299, 113, 74, 160, 193, 139, 283, 93, 56, 90, 196, 108, 34, 45, 40, 249, 87, 240, 106, 218, 208, 2, 289, 16, 11, 155, 52, 292, 272],
                TrainImagesSelectionMethod.ClosestImagesByPixelDistance: [57, 119, 185, 203, 70, 216, 238, 61, 273, 169, 81, 222, 68, 137, 58, 67, 295, 164, 50, 217, 179, 190, 125, 290, 44, 251, 274, 195, 7, 165],
                TrainImagesSelectionMethod.FarthestImagesByStructuralDistance: [57, 134, 16, 152, 133, 116, 94, 147, 177, 161, 40, 9, 218, 110, 70, 101, 75, 54, 243, 100, 98, 237, 77, 115, 106, 119, 45, 261, 241, 90],
                TrainImagesSelectionMethod.ClosestImagesByStructuralDistance: [57, 254, 238, 160, 86, 295, 187, 51, 41, 81, 273, 120, 230, 148, 25, 13, 264, 114, 236, 155, 58, 59, 151, 91, 107, 207, 289, 203, 176, 125],
                TrainImagesSelectionMethod.FarthestImagesByEarthMoversDistance: [57, 168, 144, 249, 145, 117, 288, 109, 48, 207, 252, 251, 186, 278, 296, 39, 263, 234, 167, 165, 128, 41, 290, 176, 33, 107, 202, 201, 282, 127],
                TrainImagesSelectionMethod.ClosestImagesByEarthMoversDistance: [57, 196, 13, 131, 148, 97, 187, 74, 11, 162, 147, 240, 55, 70, 102, 76, 213, 139, 256, 253, 247, 17, 227, 108, 86, 133, 19, 77, 42, 1],
            }
            assert method in preprocessed_indices_cache, f"unknown method - '{method}'"
            chosen_30_images_indices = preprocessed_indices_cache[method]
        elif method is TrainImagesSelectionMethod.RandomSelection:
            chosen_30_images_indices = _pick_random_images(
                original_ordered_monet_images, images_count=30
            )
        elif method is TrainImagesSelectionMethod.FarthestImagesByPixelDistance:
            chosen_30_images_indices = _pick_images_farthest_from_each_other(
                original_ordered_monet_images,
                distance_func=_images_pixel_distance,
                images_count=30
            )
        elif method is TrainImagesSelectionMethod.ClosestImagesByPixelDistance:
            chosen_30_images_indices = _pick_images_farthest_from_each_other(
                original_ordered_monet_images,
                distance_func=_images_pixel_distance,
                images_count=30,
                reverse_distance=True
            )
        elif method is TrainImagesSelectionMethod.FarthestImagesByStructuralDistance:
            chosen_30_images_indices = _pick_images_farthest_from_each_other(
                original_ordered_monet_images,
                distance_func=_images_structural_distance,
                images_count=30
            )
        elif method is TrainImagesSelectionMethod.ClosestImagesByStructuralDistance:
            chosen_30_images_indices = _pick_images_farthest_from_each_other(
                original_ordered_monet_images,
                distance_func=_images_structural_distance,
                images_count=30,
                reverse_distance=True
            )
        elif method is TrainImagesSelectionMethod.FarthestImagesByEarthMoversDistance:
            chosen_30_images_indices = _pick_images_farthest_from_each_other(
                original_ordered_monet_images,
                distance_func=_earth_movers_distance,
                images_count=30
            )
        elif method is TrainImagesSelectionMethod.ClosestImagesByEarthMoversDistance:
            chosen_30_images_indices = _pick_images_farthest_from_each_other(
                original_ordered_monet_images,
                distance_func=_earth_movers_distance,
                images_count=30,
                reverse_distance=True
            )
        else:
            raise NotImplementedError(f"unknown method - '{method}'")

        chosen_30_images_dataset = Dataset.from_tensor_slices([
            original_ordered_monet_images[image_idx]
            for image_idx in chosen_30_images_indices
        ])
        _plot_chosen_30_images(chosen_30_images_dataset)
    finally:
        set_training_random_seed(experiment_random_seed)
    return chosen_30_images_dataset


def _plot_chosen_30_images(chosen_30_images_dataset):
    try:
        images_shape = list(chosen_30_images_dataset)[0].shape
        print(f'*** Selected 30 train monet photos (shape: {images_shape}) ***')
        _, ax = plt.subplots(30, 1, figsize=(50, 50))
        for i, img in enumerate(chosen_30_images_dataset):
            img = (img * 127.5 + 127.5).numpy()[0].astype(np.uint8)

            ax[i].imshow(img)
        plt.show()
    finally:
        plt.close()


# Load competition dataset
##Load full dataset
def find_competition_dataset_files(local_dataset_folder_path: Path):
    if isinstance(DEVICE_STRATEGY, TPUStrategy):
        from kaggle_datasets import KaggleDatasets
        dataset_folder_path = Path(KaggleDatasets().get_gcs_path())
    else:
        dataset_folder_path = local_dataset_folder_path

    monet_dataset_files = tf.io.gfile.glob(str(dataset_folder_path / 'monet_tfrec/*.tfrec'))
    photo_dataset_files = tf.io.gfile.glob(str(dataset_folder_path / 'photo_tfrec/*.tfrec'))
    assert any(monet_dataset_files)
    assert any(photo_dataset_files)
    print(f"found {len(monet_dataset_files)} monet and {len(photo_dataset_files)} photo tfrec files.")

    return monet_dataset_files, photo_dataset_files


def _prepare_image_tensor_for_training(image):
    image = (tf.cast(image, tf.float32) / 127.5) - 1
    image = tf.reshape(image, [256, 256, 3])
    image = tf.image.resize(image, (320, 320), method='bilinear')
    return image


def load_tf_records_dataset(tf_record_files) -> Dataset:
    def _read_and_normalize_tfrecord(record):
        tfrecord_format = {
            "image_name": tf.io.FixedLenFeature([], tf.string),
            "image": tf.io.FixedLenFeature([], tf.string),
            "target": tf.io.FixedLenFeature([], tf.string)
        }
        record = tf.io.parse_single_example(record, tfrecord_format)
        image = record['image']
        image = tf.image.decode_jpeg(image, channels=3)
        image = _prepare_image_tensor_for_training(image)
        return image

    sorted_tf_record_files = sorted(tf_record_files)
    dataset = tf.data.TFRecordDataset(sorted_tf_record_files)
    dataset = dataset.map(_read_and_normalize_tfrecord, num_parallel_calls=tf.data.experimental.AUTOTUNE)
    return dataset


##Pick 30 train monet images strategies
def _pick_images_farthest_from_each_other(
        original_ordered_monet_images: list, distance_func, images_count: int, reverse_distance: bool = False
) -> list:
    comparison_images_resize_shape = (100, 100)

    def _pre_comparison_transformation_func(image_tensor):
        image_array = image_tensor.numpy()
        denormalized_image = (image_array * 127.5 + 127.5)
        resized_image = tf.image.resize(denormalized_image, comparison_images_resize_shape).numpy()
        resized_image = resized_image.astype(np.uint8)
        resized_image = resized_image[0]
        return resized_image

    final_distance_func = distance_func
    if reverse_distance:
        final_distance_func = lambda image1, image2: -distance_func(image1, image2)

    chosen_30_images_indices = _incremental_farthest_search(
        original_ordered_monet_images,
        k=images_count,
        distance_func=final_distance_func,
        pre_comparison_transformation_func=_pre_comparison_transformation_func
    )
    return chosen_30_images_indices


def _pick_random_images(original_ordered_monet_images: list, images_count: int) -> list:
    chosen_30_images_indices = list(np.random.choice(
        list(range(len(original_ordered_monet_images))), size=images_count, replace=False
    ))
    return chosen_30_images_indices


def _incremental_farthest_search(
        ordered_image_tensors_list, k: int, distance_func, pre_comparison_transformation_func
):
    remaining_images = [
        dict(
            orig_image_index=orig_image_index,
            img_tensor=img_tensor,
            img_comparison_array=pre_comparison_transformation_func(img_tensor)
        )
        for orig_image_index, img_tensor in enumerate(ordered_image_tensors_list)
    ]

    chosen_30_images_indices = [remaining_images.pop(random.randint(0, len(remaining_images) - 1))]
    for _ in tqdm(list(range(k - 1)), desc='incremental_farthest_search() main loop'):
        distances = [
            distance_func(
                i['img_comparison_array'],
                chosen_30_images_indices[0]['img_comparison_array']
            )
            for i in remaining_images
        ]
        for i, p in enumerate(remaining_images):
            for j, s in enumerate(chosen_30_images_indices):
                distances[i] = min(distances[i], distance_func(
                    p['img_comparison_array'], s['img_comparison_array']
                ))
        chosen_30_images_indices.append(remaining_images.pop(distances.index(max(distances))))
    chosen_30_images_indices = [i['orig_image_index'] for i in chosen_30_images_indices]
    return chosen_30_images_indices


def _images_pixel_distance(image1: np.array, image2: np.array) -> float:
    distance = np.sum((image1.flatten() - image2.flatten()) ** 2)
    return distance


def _images_structural_distance(image1: np.array, image2: np.array) -> float:
    similarity_index, *_ = structural_similarity(image1.flatten(), image2.flatten(), full=True)
    return -similarity_index


def _earth_movers_distance(image1: np.array, image2: np.array) -> float:
    image1_hist = _calc_image_greyscale_histogram(image1)
    image2_hist = _calc_image_greyscale_histogram(image2)
    distance = wasserstein_distance(image1_hist, image2_hist)
    return distance


def _calc_image_greyscale_histogram(image: np.array) -> np.array:
    greyscale_image = np.array(Image.fromarray(image).convert('L'))
    h, w = greyscale_image.shape
    hist = [0.0] * 256
    for i in range(h):
        for j in range(w):
            hist[greyscale_image[i, j]] += 1
    return np.array(hist) / (h * w)


def down_sample(filters, size, strides=2, padding='same'):
    initializer = tf.random_normal_initializer(0., 0.02)

    network = keras.Sequential()
    network.add(layers.Conv2D(filters, size, strides=strides, padding=padding,
                              kernel_initializer=initializer, use_bias=False))

    network.add(layers.LeakyReLU())

    return network


def up_sample(filters, size, strides=2, padding='same', apply_dropout=False):
    network = keras.Sequential()
    network.add(layers.Conv2DTranspose(
        filters, size, strides=strides, padding=padding, use_bias=False,
        kernel_initializer=tf.random_normal_initializer(0., 0.02)
    ))
    if apply_dropout:
        network.add(layers.Dropout(0.5))
    network.add(layers.ReLU())
    return network


class GeneratorNetworkStructure(Enum):
    Baseline = 'baseline'
    Thin = 'thin'
    Wide = 'wide'
    Deep = 'deep'


def build_generator_model(generator_network_structure: GeneratorNetworkStructure):
    down_stack, up_stack = _build_generator_encoder_decoder_layout(generator_network_structure)
    inputs = layers.Input(shape=[320, 320, 3])

    layers_to_direct_to_previous = []
    x = inputs
    for down in down_stack:
        x = down(x)
        layers_to_direct_to_previous.append(x)

    for up, skip in zip(up_stack, reversed(layers_to_direct_to_previous[:-1])):
        x = up(x)
        x = layers.Concatenate()([x, skip])

    initializer = tf.random_normal_initializer(0.0, 0.02)
    last_layer = layers.Conv2DTranspose(3, 4,
                                  strides=2,
                                  padding='same',
                                  kernel_initializer=initializer,
                                  activation='tanh')  # (bs, 320, 320, 3)
    generator_model = keras.Model(inputs=inputs, outputs=last_layer(x))
    return generator_model


def _build_generator_encoder_decoder_layout(network_structure: GeneratorNetworkStructure):
    # todo itay delete dimensions comments

    if network_structure is GeneratorNetworkStructure.Baseline:
        # bs = batch size
        down_stack = [
            down_sample(64, 4),  # (bs, 160, 160, 64)
            down_sample(128, 4),  # (bs, 80, 80, 128)
            down_sample(256, 4),  # (bs, 40, 40, 256)
            down_sample(512, 4),  # (bs, 20, 20, 512)
            down_sample(512, 4),  # (bs, 10, 10, 512)
            down_sample(512, 4),  # (bs, 5, 5, 512)
            down_sample(512, 4, strides=1, padding='valid'),  # (bs, 2, 2, 512)
            down_sample(512, 4),  # (bs, 1, 1, 512)
        ]
        up_stack = [
            up_sample(512, 4, apply_dropout=True),  # (bs, 2, 2, 1024)
            up_sample(512, 4, strides=1, padding='valid', apply_dropout=True),  # (bs, 5, 5, 1024)
            up_sample(512, 4, apply_dropout=True),  # (bs, 10, 10, 1024)
            up_sample(512, 4),  # (bs, 20, 20, 1024)
            up_sample(256, 4),  # (bs, 40, 40, 512)
            up_sample(128, 4),  # (bs, 80, 80, 256)
            up_sample(64, 4),  # (bs, 160, 160, 128)
        ]
    elif network_structure is GeneratorNetworkStructure.Thin:
        # bs = batch size
        down_stack = [
            down_sample(64, 4),  # (bs, 160, 160, 64)
            down_sample(128, 4),  # (bs, 80, 80, 128)
            down_sample(128, 4),  # (bs, 40, 40, 128)
            down_sample(256, 4),  # (bs, 20, 20, 256)
            down_sample(256, 4),  # (bs, 10, 10, 256)
            down_sample(256, 4),  # (bs, 5, 5, 256)
            down_sample(512, 4, strides=1, padding='valid'),  # (bs, 2, 2, 512)
            down_sample(512, 4),  # (bs, 1, 1, 512)
        ]
        up_stack = [
            up_sample(512, 4, apply_dropout=True),  # (bs, 2, 2, 1024)
            up_sample(512, 4, strides=1, padding='valid', apply_dropout=True),  # (bs, 5, 5, 1024)
            up_sample(256, 4, apply_dropout=True),  # (bs, 10, 10, 512)
            up_sample(256, 4),  # (bs, 20, 20, 512)
            up_sample(128, 4),  # (bs, 40, 40, 256)
            up_sample(128, 4),  # (bs, 80, 80, 256)
            up_sample(64, 4),  # (bs, 160, 160, 128)
        ]
    elif network_structure is GeneratorNetworkStructure.Wide:
        # bs = batch size
        down_stack = [
            down_sample(64, 4),  # (bs, 160, 160, 64)
            down_sample(256, 4),  # (bs, 80, 80, 256)
            down_sample(512, 4),  # (bs, 40, 40, 512)
            down_sample(1_024, 4),  # (bs, 20, 20, 1_024)
            down_sample(1_024, 4),  # (bs, 10, 10, 1_024)
            down_sample(1_024, 4),  # (bs, 5, 5, 1_024)
            down_sample(1_024, 4, strides=1, padding='valid'),  # (bs, 2, 2, 1_024)
            down_sample(512, 4),  # (bs, 1, 1, 512)
        ]
        up_stack = [
            up_sample(1_024, 4, apply_dropout=True),  # (bs, 2, 2, 2048)
            up_sample(1_024, 4, strides=1, padding='valid', apply_dropout=True),  # (bs, 5, 5, 2048)
            up_sample(1_024, 4, apply_dropout=True),  # (bs, 10, 10, 2048)
            up_sample(1_024, 4),  # (bs, 20, 20, 2048)
            up_sample(512, 4),  # (bs, 40, 40, 1024)
            up_sample(256, 4),  # (bs, 80, 80, 512)
            up_sample(64, 4),  # (bs, 160, 160, 128)
        ]
    elif network_structure is GeneratorNetworkStructure.Deep:
        # bs = batch size
        down_stack = [
            down_sample(64, 4),  # (bs, 160, 160, 64)
            down_sample(128, 4),  # (bs, 80, 80, 128)
            down_sample(128, 4, strides=1, padding='same'),  # (bs, 80, 80, 128)
            down_sample(256, 4),  # (bs, 40, 40, 256)
            down_sample(256, 4, strides=1, padding='same'),  # (bs, 40, 40, 256)
            down_sample(512, 4),  # (bs, 20, 20, 512)
            down_sample(512, 4, strides=1, padding='same'),  # (bs, 20, 20, 512)
            down_sample(512, 4),  # (bs, 10, 10, 512)
            down_sample(512, 4, strides=1, padding='same'),  # (bs, 10, 10, 512)
            down_sample(512, 4),  # (bs, 5, 5, 512)
            down_sample(512, 4, strides=1, padding='valid'),  # (bs, 2, 2, 512)
            down_sample(512, 4),  # (bs, 1, 1, 512)
        ]
        up_stack = [
            up_sample(512, 4, apply_dropout=True),  # (bs, 2, 2, 1024)
            up_sample(512, 4, strides=1, padding='valid', apply_dropout=True),  # (bs, 5, 5, 1024)
            up_sample(512, 4, apply_dropout=True),  # (bs, 10, 10, 1024)
            up_sample(512, 4, strides=1, padding='same'),  # (bs, 10, 10, 1024)
            up_sample(512, 4),  # (bs, 20, 20, 1024)
            up_sample(512, 4, strides=1, padding='same'),  # (bs, 20, 20, 1024)
            up_sample(256, 4),  # (bs, 40, 40, 512)
            up_sample(256, 4, strides=1, padding='same'),  # (bs, 40, 40, 512)
            up_sample(128, 4),  # (bs, 80, 80, 256)
            up_sample(128, 4, strides=1, padding='same'),  # (bs, 80, 80, 256)
            up_sample(64, 4),  # (bs, 160, 160, 128)
        ]
    else:
        raise NotImplementedError(f"unknown network structure - '{network_structure}'")
    return down_stack, up_stack


"""# Build the discriminator

The discriminator takes in the input image and classifies it as real or fake (generated). Instead of outputing a single node, the discriminator outputs a smaller 2D image with higher pixel values indicating a real classification and lower values indicating a fake classification.
"""


def build_discriminator_model():
    initializer = tf.random_normal_initializer(0., 0.02)
    inp = layers.Input(shape=[320, 320, 3], name='input_image')

    x = inp
    down1 = down_sample(64, 5)(x)  # (bs, 160, 160, 64)
    down2 = down_sample(128, 4)(down1)  # (bs, 80, 80, 128)
    down3 = down_sample(256, 3)(down2)  # (bs, 40, 40, 256)
    down4 = down_sample(256, 2)(down3)  # (bs, 20, 20, 256)

    zero_pad1 = layers.ZeroPadding2D()(down4)  # (bs, 34, 34, 256)
    conv = layers.Conv2D(512, 4, strides=1,
                         kernel_initializer=initializer,
                         use_bias=False)(zero_pad1)  # (bs, 31, 31, 512)

    leaky_relu = layers.LeakyReLU()(conv)
    zero_pad2 = layers.ZeroPadding2D()(leaky_relu)  # (bs, 33, 33, 512)
    last = layers.Conv2D(1, 4, strides=1,
                         kernel_initializer=initializer)(zero_pad2)  # (bs, 30, 30, 1)

    discriminator_model = tf.keras.Model(inputs=inp, outputs=last)
    return discriminator_model


class CycleGan(keras.Model):
    def __init__(
            self,
            monet_generator,
            photo_generator,
            monet_discriminator,
            photo_discriminator,
            lambda_cycle=10,
    ):
        super(CycleGan, self).__init__()
        self.m_gen = monet_generator
        self.p_gen = photo_generator
        self.m_disc = monet_discriminator
        self.p_disc = photo_discriminator
        self.lambda_cycle = lambda_cycle

    def compile(
            self,
            m_gen_optimizer,
            p_gen_optimizer,
            m_disc_optimizer,
            p_disc_optimizer,
            gen_loss_fn,
            disc_loss_fn,
            cycle_loss_fn,
            identity_loss_fn
    ):
        super(CycleGan, self).compile()
        self.m_gen_optimizer = m_gen_optimizer
        self.p_gen_optimizer = p_gen_optimizer
        self.m_disc_optimizer = m_disc_optimizer
        self.p_disc_optimizer = p_disc_optimizer
        self.gen_loss_fn = gen_loss_fn
        self.disc_loss_fn = disc_loss_fn
        self.cycle_loss_fn = cycle_loss_fn
        self.identity_loss_fn = identity_loss_fn
        self._my_hack_history = []

    def train_step(self, batch_data):
        real_monet, real_photo = batch_data

        with tf.GradientTape(persistent=True) as tape:
            # photo to monet back to photo
            fake_monet = self.m_gen(real_photo, training=True)
            cycled_photo = self.p_gen(fake_monet, training=True)

            # monet to photo back to monet
            fake_photo = self.p_gen(real_monet, training=True)
            cycled_monet = self.m_gen(fake_photo, training=True)

            # generating itself
            same_monet = self.m_gen(real_monet, training=True)
            same_photo = self.p_gen(real_photo, training=True)

            # discriminator used to check, inputing real images
            disc_real_monet = self.m_disc(real_monet, training=True)
            disc_real_photo = self.p_disc(real_photo, training=True)

            # discriminator used to check, inputing fake images
            disc_fake_monet = self.m_disc(fake_monet, training=True)
            disc_fake_photo = self.p_disc(fake_photo, training=True)

            # evaluates generator loss
            monet_gen_loss = self.gen_loss_fn(disc_fake_monet)
            photo_gen_loss = self.gen_loss_fn(disc_fake_photo)

            # evaluates total cycle consistency loss
            total_cycle_loss = self.cycle_loss_fn(real_monet, cycled_monet, self.lambda_cycle) + self.cycle_loss_fn(
                real_photo, cycled_photo, self.lambda_cycle)

            # evaluates total generator loss
            total_monet_gen_loss = monet_gen_loss + total_cycle_loss + self.identity_loss_fn(real_monet, same_monet,
                                                                                             self.lambda_cycle)
            total_photo_gen_loss = photo_gen_loss + total_cycle_loss + self.identity_loss_fn(real_photo, same_photo,
                                                                                             self.lambda_cycle)

            # evaluates discriminator loss
            monet_disc_loss = self.disc_loss_fn(disc_real_monet, disc_fake_monet)
            photo_disc_loss = self.disc_loss_fn(disc_real_photo, disc_fake_photo)

        # Calculate the gradients for generator and discriminator
        monet_generator_gradients = tape.gradient(total_monet_gen_loss,
                                                  self.m_gen.trainable_variables)
        photo_generator_gradients = tape.gradient(total_photo_gen_loss,
                                                  self.p_gen.trainable_variables)

        monet_discriminator_gradients = tape.gradient(monet_disc_loss,
                                                      self.m_disc.trainable_variables)
        photo_discriminator_gradients = tape.gradient(photo_disc_loss,
                                                      self.p_disc.trainable_variables)

        # Apply the gradients to the optimizer
        self.m_gen_optimizer.apply_gradients(zip(monet_generator_gradients,
                                                 self.m_gen.trainable_variables))

        self.p_gen_optimizer.apply_gradients(zip(photo_generator_gradients,
                                                 self.p_gen.trainable_variables))

        self.m_disc_optimizer.apply_gradients(zip(monet_discriminator_gradients,
                                                  self.m_disc.trainable_variables))

        self.p_disc_optimizer.apply_gradients(zip(photo_discriminator_gradients,
                                                  self.p_disc.trainable_variables))

        rett = {
            "monet_gen_loss": total_monet_gen_loss,
            "photo_gen_loss": total_photo_gen_loss,
            "monet_disc_loss": monet_disc_loss,
            "photo_disc_loss": photo_disc_loss
        }
        self._my_hack_history.append(rett)
        return rett


def _build_losses():
    def identity_loss(real_image, same_image, lambda_):
        loss = tf.reduce_mean(tf.abs(real_image - same_image))
        return lambda_ * 0.5 * loss

    def generators_loss(generated):
        loss = tf.keras.losses.BinaryCrossentropy(
            from_logits=True,
            reduction=tf.keras.losses.Reduction.NONE
        )(tf.ones_like(generated), generated)
        return loss

    def discriminators_loss(real, generated):
        real_loss = tf.keras.losses.BinaryCrossentropy(
            from_logits=True,
            reduction=tf.keras.losses.Reduction.NONE
        )(tf.ones_like(real), real)

        generated_loss = tf.keras.losses.BinaryCrossentropy(
            from_logits=True,
            reduction=tf.keras.losses.Reduction.NONE
        )(tf.zeros_like(generated), generated)

        total_disc_loss = real_loss + generated_loss
        return total_disc_loss * 0.5

    def final_cycle_loss(real_image, cycled_image, lambda_):
        loss1 = tf.reduce_mean(tf.abs(real_image - cycled_image))
        return lambda_ * loss1

    return identity_loss, generators_loss, discriminators_loss, final_cycle_loss


def _build_cycle_gan_model(train_settings):
    generator_network_structure = train_settings['generator_network_structure']
    monet_generator = build_generator_model(generator_network_structure)
    photo_generator = build_generator_model(generator_network_structure)
    monet_discriminator = build_discriminator_model()
    photo_discriminator = build_discriminator_model()

    identity_loss, generators_loss, discriminators_loss, final_cycle_loss = _build_losses()

    optimizer_builder = train_settings['optimizer_builder']
    monet_generator_optimizer = optimizer_builder()
    photo_generator_optimizer = optimizer_builder()
    monet_discriminator_optimizer = optimizer_builder()
    photo_discriminator_optimizer = optimizer_builder()

    cycle_gan_model = CycleGan(
        monet_generator, photo_generator, monet_discriminator, photo_discriminator
    )
    cycle_gan_model.compile(
        m_gen_optimizer=monet_generator_optimizer,
        p_gen_optimizer=photo_generator_optimizer,
        m_disc_optimizer=monet_discriminator_optimizer,
        p_disc_optimizer=photo_discriminator_optimizer,
        gen_loss_fn=generators_loss,
        disc_loss_fn=discriminators_loss,
        cycle_loss_fn=final_cycle_loss,
        identity_loss_fn=identity_loss
    )
    return cycle_gan_model, monet_generator


#Plotting utils
def plot_cycle_gan_train_losses(train_history):
    epoch_level_train_history = {
        loss_name: {
            epoch: epoch_losses.flatten().mean()
            for epoch, epoch_losses in enumerate(epochs_losses)
        }
        for loss_name, epochs_losses in train_history.history.items()
    }

    try:
        fig, (top_ax, bottom_ax) = plt.subplots(2, figsize=(12, 12))
        fig.suptitle('Loss vs Epoch')

        top_ax.set_title('Generators')
        plt_lines = []
        for loss_name in ('monet_gen_loss', 'photo_gen_loss'):
            loss_epoch_values = epoch_level_train_history[loss_name]
            line, = top_ax.plot(loss_epoch_values.keys(), loss_epoch_values.values(), label=loss_name)
            plt_lines.append(line)
        top_ax.legend(handles=plt_lines)

        bottom_ax.set_title('Discriminators')
        plt_lines = []
        for loss_name in ('monet_disc_loss', 'photo_disc_loss'):
            loss_epoch_values = epoch_level_train_history[loss_name]
            line, = bottom_ax.plot(loss_epoch_values.keys(), loss_epoch_values.values(), label=loss_name)
            plt_lines.append(line)
        bottom_ax.legend(handles=plt_lines)

        plt.tight_layout()
        plt.show()
    finally:
        plt.close()

    final_losses = {
        loss_name: str(loss_epoch_values[max(loss_epoch_values.keys())])
        for loss_name, loss_epoch_values in epoch_level_train_history.items()
    }
    print(f"*** trained cycle gan final losses ***\n{json.dumps(final_losses, indent=4)}")


def plot_predictions_sample(monet_generator, photo_dataset):
    print('*** Show trained model predictions sample ***')
    _, ax = plt.subplots(5, 2, figsize=(12, 12))
    for i, img in enumerate(photo_dataset.take(5)):
        prediction = monet_generator(img, training=False)[0].numpy()
        prediction = (prediction * 127.5 + 127.5).astype(np.uint8)
        img = (img[0] * 127.5 + 127.5).numpy().astype(np.uint8)

        ax[i, 0].imshow(img)
        ax[i, 1].imshow(prediction)
        ax[i, 0].set_title("Input Photo")
        ax[i, 1].set_title("Monet-esque")
        ax[i, 0].axis("off")
        ax[i, 1].axis("off")
    plt.show()


"""# Define loss functions

The discriminator loss function below compares real images to a matrix of 1s and fake images to a matrix of 0s. The perfect discriminator will output all 1s for real images and all 0s for fake images. The discriminator loss outputs the average of the real and generated loss.
"""

"""# Create submission file"""
def create_predictions_for_kaggle_submission(monet_generator: tf.keras.Model, photo_dataset: Dataset):
    wip_images_folder = Path(f"/tmp/wip_{datetime.now().strftime('%y_%m_%d__%H_%M_%S')}/")
    wip_images_folder.mkdir(parents=True, exist_ok=True)
    output_zip_path = Path('/kaggle/working/images.zip')
    if output_zip_path.exists():
        output_zip_path.unlink()

    with ZipFile(output_zip_path, 'w') as output_images_zip, \
            tqdm(total=7_038, desc='generating prediction images for kaggle submission') as pbar:
        for i, img in enumerate(photo_dataset):
            prediction = monet_generator(img, training=False)[0]
            prediction = tf.image.resize(prediction, (256, 256), method='bilinear')
            prediction = prediction.numpy()
            prediction = (prediction * 127.5 + 127.5).astype(np.uint8)
            im = Image.fromarray(prediction)
            wip_image_file_path = wip_images_folder / f'{i + 1}.jpg'
            im.save(wip_image_file_path)
            output_images_zip.write(filename=wip_image_file_path, arcname=wip_image_file_path.name)
            wip_image_file_path.unlink()
            pbar.update()


def experiment_flow(
        choose_30_images_method: TrainImagesSelectionMethod,
        train_settings: dict,
        experiment_random_seed: int,
        create_kaggle_predictions_for_submission: bool = False,
):
    set_training_random_seed(experiment_random_seed)

    monet_dataset_files, photo_dataset_files = find_competition_dataset_files(LOCAL_DATASET_FOLDER_PATH)
    original_ordered_monet_dataset = load_tf_records_dataset(monet_dataset_files).batch(1)
    photo_dataset = load_tf_records_dataset(photo_dataset_files).batch(1)

    chosen_30_monet_dataset = _choose_30_monet_train_images(
        original_ordered_monet_dataset, choose_30_images_method,
        experiment_random_seed, use_preprocessed_cache=True
    )
    train_pairs_dataset = tf.data.Dataset.zip((chosen_30_monet_dataset, photo_dataset))

    with DEVICE_STRATEGY.scope():
        cycle_gan_model, monet_generator = _build_cycle_gan_model(train_settings)

    train_history = cycle_gan_model.fit(
        train_pairs_dataset,
        epochs=train_settings['train_epochs'],
        verbose=2
    )
    plot_cycle_gan_train_losses(train_history)
    plot_predictions_sample(monet_generator, photo_dataset)

    if create_kaggle_predictions_for_submission:
        create_predictions_for_kaggle_submission(monet_generator, photo_dataset)


#Experiments
##Experiment functions
class ExperimentsToRunConfig:
    CHOOSE_30_TRAIN_IMAGES_EXPERIMENT = False
    GENERATOR_NETWORK_STRUCTURE_EXPERIMENT = True
    ITAY_TO_DELETE_EXPERIMENT = False


def run_choose_30_train_images_experiment():
    base_desc = 'choose_30_images_methods loop'
    with tqdm(total=len(TrainImagesSelectionMethod), desc=base_desc) as pbar:
        for method in TrainImagesSelectionMethod:
            pbar.set_description(f"{base_desc} (curr_method = '{method}')")
            experiment_flow(
                choose_30_images_method=method,
                train_settings=dict(
                    train_epochs=40,
                    optimizer_builder=lambda: tf.keras.optimizers.Adam(learning_rate=0.001, decay=0.001),
                    generator_network_structure=GeneratorNetworkStructure.Baseline
                ),
                experiment_random_seed=1,
                create_kaggle_predictions_for_submission=False
            )
            pbar.update()


def run_generator_network_structure_experiment():
    base_desc = 'generator_network_structure_experiment loop'
    run_combinations = [
        dict(
            generator_network_structure=generator_network_structure,
            train_images_selection_method=train_images_selection_method
        )
        for generator_network_structure in GeneratorNetworkStructure
        for train_images_selection_method in (
            TrainImagesSelectionMethod.RandomSelection,
            TrainImagesSelectionMethod.FarthestImagesByPixelDistance
        )
    ]

    with tqdm(total=len(run_combinations), desc=base_desc) as pbar:
        for run_combination in run_combinations:
            pbar.set_description(f"{base_desc} (run_combination={run_combination})")
            experiment_flow(
                choose_30_images_method=run_combination['train_images_selection_method'],
                train_settings=dict(
                    train_epochs=40,
                    optimizer_builder=lambda: tf.keras.optimizers.Adam(learning_rate=0.001, decay=0.001),
                    generator_network_structure=run_combination['generator_network_structure']
                ),
                experiment_random_seed=1,
                create_kaggle_predictions_for_submission=False
            )
            pbar.update()


def run_itay_to_delete_experiment():
    experiment_flow(
        choose_30_images_method=TrainImagesSelectionMethod.FarthestImagesByPixelDistance,
        train_settings=dict(
            train_epochs=40,
            optimizer_builder=lambda: tf.keras.optimizers.Adam(learning_rate=0.001, decay=0.001),
            generator_network_structure=GeneratorNetworkStructure.Deep
        ),
        experiment_random_seed=1,
        create_kaggle_predictions_for_submission=False
    )


##Experiment execution
if ExperimentsToRunConfig.CHOOSE_30_TRAIN_IMAGES_EXPERIMENT:
    run_choose_30_train_images_experiment()

if ExperimentsToRunConfig.GENERATOR_NETWORK_STRUCTURE_EXPERIMENT:
    run_generator_network_structure_experiment()

if ExperimentsToRunConfig.ITAY_TO_DELETE_EXPERIMENT:
    run_itay_to_delete_experiment()
dependencies_already_installed: True
choose_strongest_available_device_strategy() - selected strategy type: _DefaultDistributionStrategy
Wed Feb 15 16:45:32 2023       
+-----------------------------------------------------------------------------+
| NVIDIA-SMI 470.82.01    Driver Version: 470.82.01    CUDA Version: 11.4     |
|-------------------------------+----------------------+----------------------+
| GPU  Name        Persistence-M| Bus-Id        Disp.A | Volatile Uncorr. ECC |
| Fan  Temp  Perf  Pwr:Usage/Cap|         Memory-Usage | GPU-Util  Compute M. |
|                               |                      |               MIG M. |
|===============================+======================+======================|
|   0  Tesla P100-PCIE...  Off  | 00000000:00:04.0 Off |                    0 |
| N/A   38C    P0    37W / 250W |  15977MiB / 16280MiB |      0%      Default |
|                               |                      |                  N/A |
+-------------------------------+----------------------+----------------------+
                                                                               
+-----------------------------------------------------------------------------+
| Processes:                                                                  |
|  GPU   GI   CI        PID   Type   Process name                  GPU Memory |
|        ID   ID                                                   Usage      |
|=============================================================================|
+-----------------------------------------------------------------------------+


*** running_on_tpu - False ***


dataset_already_downloaded: True
generator_network_structure_experiment loop (run_combination={'generator_network_structure': <GeneratorNetworkStructure.Baseline: 'baseline'>, 'train_images_selection_method': <TrainImagesSelectionMethod.RandomSelection: 'random_selection'>}):   0%|          | 0/8 [00:00<?, ?it/s]
set_training_random_seed() - seed value: 1
found 5 monet and 20 photo tfrec files.
set_training_random_seed() - seed value: 42
2023-02-15 16:45:33.055715: W tensorflow/core/data/root_dataset.cc:167] Optimization loop failed: Cancelled: Operation was cancelled
*** Selected 30 train monet photos (shape: (1, 320, 320, 3)) ***
set_training_random_seed() - seed value: 1
Epoch 1/40
30/30 - 19s - monet_gen_loss: 6.9626 - photo_gen_loss: 5.9112 - monet_disc_loss: 0.2345 - photo_disc_loss: 0.6435
Epoch 2/40
30/30 - 12s - monet_gen_loss: 6.7633 - photo_gen_loss: 4.6662 - monet_disc_loss: 0.0989 - photo_disc_loss: 0.6746
Epoch 3/40
30/30 - 12s - monet_gen_loss: 4.6350 - photo_gen_loss: 4.8057 - monet_disc_loss: 0.7258 - photo_disc_loss: 0.6197
Epoch 4/40
30/30 - 12s - monet_gen_loss: 3.5104 - photo_gen_loss: 3.1220 - monet_disc_loss: 0.7640 - photo_disc_loss: 0.7535
Epoch 5/40
30/30 - 12s - monet_gen_loss: 3.4218 - photo_gen_loss: 2.8866 - monet_disc_loss: 0.8662 - photo_disc_loss: 0.7112
Epoch 6/40
30/30 - 12s - monet_gen_loss: 3.2593 - photo_gen_loss: 2.6979 - monet_disc_loss: 0.7104 - photo_disc_loss: 0.7792
Epoch 7/40
30/30 - 12s - monet_gen_loss: 3.2395 - photo_gen_loss: 2.6063 - monet_disc_loss: 0.7459 - photo_disc_loss: 0.7625
Epoch 8/40
30/30 - 12s - monet_gen_loss: 3.1849 - photo_gen_loss: 2.5222 - monet_disc_loss: 0.8755 - photo_disc_loss: 0.8341
Epoch 9/40
30/30 - 12s - monet_gen_loss: 3.3647 - photo_gen_loss: 2.4910 - monet_disc_loss: 0.8527 - photo_disc_loss: 0.8187
Epoch 10/40
30/30 - 12s - monet_gen_loss: 3.3144 - photo_gen_loss: 2.7887 - monet_disc_loss: 0.8126 - photo_disc_loss: 0.6342
Epoch 11/40
30/30 - 12s - monet_gen_loss: 3.0709 - photo_gen_loss: 2.5117 - monet_disc_loss: 0.8032 - photo_disc_loss: 0.7544
Epoch 12/40
30/30 - 12s - monet_gen_loss: 3.3065 - photo_gen_loss: 2.7014 - monet_disc_loss: 0.9114 - photo_disc_loss: 0.7117
Epoch 13/40
30/30 - 12s - monet_gen_loss: 3.2505 - photo_gen_loss: 2.4335 - monet_disc_loss: 0.8669 - photo_disc_loss: 0.8110
Epoch 14/40
30/30 - 12s - monet_gen_loss: 3.0201 - photo_gen_loss: 2.3520 - monet_disc_loss: 0.8344 - photo_disc_loss: 0.8064
Epoch 15/40
30/30 - 12s - monet_gen_loss: 3.1055 - photo_gen_loss: 2.3546 - monet_disc_loss: 0.9768 - photo_disc_loss: 0.8247
Epoch 16/40
30/30 - 12s - monet_gen_loss: 2.8504 - photo_gen_loss: 2.3155 - monet_disc_loss: 0.8964 - photo_disc_loss: 0.7060
Epoch 17/40
30/30 - 12s - monet_gen_loss: 3.0314 - photo_gen_loss: 2.3105 - monet_disc_loss: 0.9045 - photo_disc_loss: 0.8191
Epoch 18/40
30/30 - 12s - monet_gen_loss: 3.1208 - photo_gen_loss: 2.4715 - monet_disc_loss: 0.9147 - photo_disc_loss: 0.7057
Epoch 19/40
30/30 - 12s - monet_gen_loss: 2.8780 - photo_gen_loss: 2.1129 - monet_disc_loss: 0.9312 - photo_disc_loss: 0.9064
Epoch 20/40
30/30 - 12s - monet_gen_loss: 2.9279 - photo_gen_loss: 2.4213 - monet_disc_loss: 0.9045 - photo_disc_loss: 0.6794
Epoch 21/40
30/30 - 12s - monet_gen_loss: 2.9812 - photo_gen_loss: 2.2528 - monet_disc_loss: 0.9079 - photo_disc_loss: 0.8212
Epoch 22/40
30/30 - 12s - monet_gen_loss: 2.8172 - photo_gen_loss: 2.1002 - monet_disc_loss: 0.8932 - photo_disc_loss: 0.8550
Epoch 23/40
30/30 - 12s - monet_gen_loss: 2.7932 - photo_gen_loss: 2.2246 - monet_disc_loss: 0.9597 - photo_disc_loss: 0.7914
Epoch 24/40
30/30 - 12s - monet_gen_loss: 2.8371 - photo_gen_loss: 2.2055 - monet_disc_loss: 0.7947 - photo_disc_loss: 0.8277
Epoch 25/40
30/30 - 12s - monet_gen_loss: 3.0438 - photo_gen_loss: 2.4541 - monet_disc_loss: 0.9631 - photo_disc_loss: 0.7323
Epoch 26/40
30/30 - 12s - monet_gen_loss: 2.8756 - photo_gen_loss: 2.1502 - monet_disc_loss: 0.8776 - photo_disc_loss: 0.8439
Epoch 27/40
30/30 - 12s - monet_gen_loss: 2.9542 - photo_gen_loss: 2.4061 - monet_disc_loss: 0.9210 - photo_disc_loss: 0.6947
Epoch 28/40
30/30 - 12s - monet_gen_loss: 2.8047 - photo_gen_loss: 2.0609 - monet_disc_loss: 0.8612 - photo_disc_loss: 0.8932
Epoch 29/40
30/30 - 12s - monet_gen_loss: 2.8905 - photo_gen_loss: 2.3097 - monet_disc_loss: 0.9428 - photo_disc_loss: 0.7569
Epoch 30/40
30/30 - 12s - monet_gen_loss: 2.7459 - photo_gen_loss: 2.1800 - monet_disc_loss: 0.8869 - photo_disc_loss: 0.8299
Epoch 31/40
30/30 - 12s - monet_gen_loss: 2.8500 - photo_gen_loss: 2.3103 - monet_disc_loss: 0.8642 - photo_disc_loss: 0.7043
Epoch 32/40
30/30 - 12s - monet_gen_loss: 2.8857 - photo_gen_loss: 2.1102 - monet_disc_loss: 0.9202 - photo_disc_loss: 0.8771
Epoch 33/40
30/30 - 12s - monet_gen_loss: 2.7054 - photo_gen_loss: 2.2203 - monet_disc_loss: 0.8380 - photo_disc_loss: 0.7105
Epoch 34/40
30/30 - 12s - monet_gen_loss: 2.7378 - photo_gen_loss: 2.1267 - monet_disc_loss: 0.8733 - photo_disc_loss: 0.8300
Epoch 35/40
30/30 - 12s - monet_gen_loss: 2.6889 - photo_gen_loss: 2.1963 - monet_disc_loss: 0.8667 - photo_disc_loss: 0.7326
Epoch 36/40
30/30 - 12s - monet_gen_loss: 2.7621 - photo_gen_loss: 2.1806 - monet_disc_loss: 0.8704 - photo_disc_loss: 0.7980
Epoch 37/40
30/30 - 12s - monet_gen_loss: 2.7949 - photo_gen_loss: 2.1478 - monet_disc_loss: 0.9164 - photo_disc_loss: 0.7865
Epoch 38/40
30/30 - 12s - monet_gen_loss: 2.6348 - photo_gen_loss: 2.1189 - monet_disc_loss: 0.8554 - photo_disc_loss: 0.7779
Epoch 39/40
30/30 - 12s - monet_gen_loss: 2.9706 - photo_gen_loss: 2.2352 - monet_disc_loss: 1.0323 - photo_disc_loss: 0.7970
Epoch 40/40
30/30 - 12s - monet_gen_loss: 2.5987 - photo_gen_loss: 2.1137 - monet_disc_loss: 0.9298 - photo_disc_loss: 0.8303
*** trained cycle gan final losses ***
{
    "monet_gen_loss": "2.598734",
    "photo_gen_loss": "2.1136844",
    "monet_disc_loss": "0.929799",
    "photo_disc_loss": "0.83030796"
}
*** Show trained model predictions sample ***
generator_network_structure_experiment loop (run_combination={'generator_network_structure': <GeneratorNetworkStructure.Baseline: 'baseline'>, 'train_images_selection_method': <TrainImagesSelectionMethod.FarthestImagesByPixelDistance: 'farthest_images_by_pixel_distance'>}):  12%|█▎        | 1/8 [12:53<1:30:16, 773.79s/it]
set_training_random_seed() - seed value: 1
found 5 monet and 20 photo tfrec files.
set_training_random_seed() - seed value: 42
*** Selected 30 train monet photos (shape: (1, 320, 320, 3)) ***
2023-02-15 16:58:26.734708: W tensorflow/core/data/root_dataset.cc:167] Optimization loop failed: Cancelled: Operation was cancelled
set_training_random_seed() - seed value: 1
Epoch 1/40
30/30 - 19s - monet_gen_loss: 4.4588 - photo_gen_loss: 4.3893 - monet_disc_loss: 0.9679 - photo_disc_loss: 0.6927
Epoch 2/40
30/30 - 12s - monet_gen_loss: 4.7791 - photo_gen_loss: 4.1088 - monet_disc_loss: 0.6077 - photo_disc_loss: 0.7131
Epoch 3/40
30/30 - 12s - monet_gen_loss: 3.7342 - photo_gen_loss: 3.2213 - monet_disc_loss: 0.6502 - photo_disc_loss: 0.7103
Epoch 4/40
30/30 - 12s - monet_gen_loss: 3.3687 - photo_gen_loss: 3.0517 - monet_disc_loss: 0.6132 - photo_disc_loss: 0.6684
Epoch 5/40
30/30 - 12s - monet_gen_loss: 3.5114 - photo_gen_loss: 2.9045 - monet_disc_loss: 0.6288 - photo_disc_loss: 0.6473
Epoch 6/40
30/30 - 12s - monet_gen_loss: 3.0068 - photo_gen_loss: 2.7803 - monet_disc_loss: 0.6712 - photo_disc_loss: 0.6469
Epoch 7/40
30/30 - 12s - monet_gen_loss: 3.5435 - photo_gen_loss: 3.0683 - monet_disc_loss: 0.5127 - photo_disc_loss: 0.6262
Epoch 8/40
30/30 - 12s - monet_gen_loss: 2.7614 - photo_gen_loss: 2.7735 - monet_disc_loss: 0.6545 - photo_disc_loss: 0.6372
Epoch 9/40
30/30 - 12s - monet_gen_loss: 3.0355 - photo_gen_loss: 2.6660 - monet_disc_loss: 0.4845 - photo_disc_loss: 0.6585
Epoch 10/40
30/30 - 12s - monet_gen_loss: 3.6220 - photo_gen_loss: 3.1978 - monet_disc_loss: 0.4996 - photo_disc_loss: 0.6169
Epoch 11/40
30/30 - 12s - monet_gen_loss: 2.8367 - photo_gen_loss: 2.6351 - monet_disc_loss: 0.7118 - photo_disc_loss: 0.6369
Epoch 12/40
30/30 - 12s - monet_gen_loss: 2.9212 - photo_gen_loss: 2.4975 - monet_disc_loss: 0.6706 - photo_disc_loss: 0.6133
Epoch 13/40
30/30 - 12s - monet_gen_loss: 2.7333 - photo_gen_loss: 2.4768 - monet_disc_loss: 0.6357 - photo_disc_loss: 0.5805
Epoch 14/40
30/30 - 12s - monet_gen_loss: 2.6923 - photo_gen_loss: 2.2797 - monet_disc_loss: 0.5942 - photo_disc_loss: 0.6433
Epoch 15/40
30/30 - 12s - monet_gen_loss: 2.6800 - photo_gen_loss: 2.3027 - monet_disc_loss: 0.5680 - photo_disc_loss: 0.5492
Epoch 16/40
30/30 - 12s - monet_gen_loss: 2.4687 - photo_gen_loss: 2.2622 - monet_disc_loss: 0.4972 - photo_disc_loss: 0.5643
Epoch 17/40
30/30 - 12s - monet_gen_loss: 2.6511 - photo_gen_loss: 2.4242 - monet_disc_loss: 0.6424 - photo_disc_loss: 0.5433
Epoch 18/40
30/30 - 12s - monet_gen_loss: 2.6250 - photo_gen_loss: 2.4111 - monet_disc_loss: 0.5430 - photo_disc_loss: 0.5057
Epoch 19/40
30/30 - 12s - monet_gen_loss: 2.6670 - photo_gen_loss: 2.3912 - monet_disc_loss: 0.5175 - photo_disc_loss: 0.5799
Epoch 20/40
30/30 - 12s - monet_gen_loss: 2.4601 - photo_gen_loss: 2.2331 - monet_disc_loss: 0.6611 - photo_disc_loss: 0.5898
Epoch 21/40
30/30 - 12s - monet_gen_loss: 2.6985 - photo_gen_loss: 2.2634 - monet_disc_loss: 0.4954 - photo_disc_loss: 0.5056
Epoch 22/40
30/30 - 12s - monet_gen_loss: 2.6048 - photo_gen_loss: 2.2794 - monet_disc_loss: 0.5960 - photo_disc_loss: 0.4929
Epoch 23/40
30/30 - 12s - monet_gen_loss: 2.5756 - photo_gen_loss: 2.2293 - monet_disc_loss: 0.4458 - photo_disc_loss: 0.5316
Epoch 24/40
30/30 - 12s - monet_gen_loss: 2.4966 - photo_gen_loss: 2.1695 - monet_disc_loss: 0.7215 - photo_disc_loss: 0.6127
Epoch 25/40
30/30 - 12s - monet_gen_loss: 2.4521 - photo_gen_loss: 2.2148 - monet_disc_loss: 0.6475 - photo_disc_loss: 0.5186
Epoch 26/40
30/30 - 12s - monet_gen_loss: 2.5898 - photo_gen_loss: 2.5094 - monet_disc_loss: 0.6118 - photo_disc_loss: 0.4357
Epoch 27/40
30/30 - 12s - monet_gen_loss: 2.4629 - photo_gen_loss: 2.3138 - monet_disc_loss: 0.5766 - photo_disc_loss: 0.4973
Epoch 28/40
30/30 - 12s - monet_gen_loss: 2.5601 - photo_gen_loss: 2.3552 - monet_disc_loss: 0.5605 - photo_disc_loss: 0.4953
Epoch 29/40
30/30 - 12s - monet_gen_loss: 2.5462 - photo_gen_loss: 2.3141 - monet_disc_loss: 0.5594 - photo_disc_loss: 0.5284
Epoch 30/40
30/30 - 12s - monet_gen_loss: 2.6758 - photo_gen_loss: 2.3794 - monet_disc_loss: 0.5579 - photo_disc_loss: 0.5441
Epoch 31/40
30/30 - 12s - monet_gen_loss: 2.5304 - photo_gen_loss: 2.3340 - monet_disc_loss: 0.5466 - photo_disc_loss: 0.5333
Epoch 32/40
30/30 - 12s - monet_gen_loss: 2.6963 - photo_gen_loss: 2.3732 - monet_disc_loss: 0.5290 - photo_disc_loss: 0.5095
Epoch 33/40
30/30 - 12s - monet_gen_loss: 2.4645 - photo_gen_loss: 2.5061 - monet_disc_loss: 0.5262 - photo_disc_loss: 0.3853
Epoch 34/40
30/30 - 12s - monet_gen_loss: 2.7450 - photo_gen_loss: 2.2151 - monet_disc_loss: 0.5070 - photo_disc_loss: 0.5724
Epoch 35/40
30/30 - 12s - monet_gen_loss: 2.4855 - photo_gen_loss: 2.3618 - monet_disc_loss: 0.5901 - photo_disc_loss: 0.5203
Epoch 36/40
30/30 - 12s - monet_gen_loss: 2.6043 - photo_gen_loss: 2.3857 - monet_disc_loss: 0.5189 - photo_disc_loss: 0.5244
Epoch 37/40
30/30 - 12s - monet_gen_loss: 2.7309 - photo_gen_loss: 2.1973 - monet_disc_loss: 0.5159 - photo_disc_loss: 0.6037
Epoch 38/40
30/30 - 12s - monet_gen_loss: 2.4817 - photo_gen_loss: 2.3855 - monet_disc_loss: 0.6943 - photo_disc_loss: 0.5185
Epoch 39/40
30/30 - 12s - monet_gen_loss: 2.8610 - photo_gen_loss: 2.4673 - monet_disc_loss: 0.5180 - photo_disc_loss: 0.4894
Epoch 40/40
30/30 - 12s - monet_gen_loss: 2.5357 - photo_gen_loss: 2.4546 - monet_disc_loss: 0.5345 - photo_disc_loss: 0.4957
*** trained cycle gan final losses ***
{
    "monet_gen_loss": "2.5356746",
    "photo_gen_loss": "2.4545534",
    "monet_disc_loss": "0.53454167",
    "photo_disc_loss": "0.49571583"
}
*** Show trained model predictions sample ***
generator_network_structure_experiment loop (run_combination={'generator_network_structure': <GeneratorNetworkStructure.Thin: 'thin'>, 'train_images_selection_method': <TrainImagesSelectionMethod.RandomSelection: 'random_selection'>}):  25%|██▌       | 2/8 [26:21<1:19:20, 793.49s/it]                                       
set_training_random_seed() - seed value: 1
found 5 monet and 20 photo tfrec files.
set_training_random_seed() - seed value: 42
2023-02-15 17:11:54.019710: W tensorflow/core/data/root_dataset.cc:167] Optimization loop failed: Cancelled: Operation was cancelled
*** Selected 30 train monet photos (shape: (1, 320, 320, 3)) ***
set_training_random_seed() - seed value: 1
Epoch 1/40
30/30 - 17s - monet_gen_loss: 5.6887 - photo_gen_loss: 4.5225 - monet_disc_loss: 0.6457 - photo_disc_loss: 4.6325
Epoch 2/40
30/30 - 10s - monet_gen_loss: 8.8450 - photo_gen_loss: 6.3208 - monet_disc_loss: 3.5009 - photo_disc_loss: 0.6056
Epoch 3/40
30/30 - 10s - monet_gen_loss: 3.5875 - photo_gen_loss: 3.5142 - monet_disc_loss: 0.6159 - photo_disc_loss: 0.7318
Epoch 4/40
30/30 - 10s - monet_gen_loss: 3.3904 - photo_gen_loss: 2.8538 - monet_disc_loss: 0.8142 - photo_disc_loss: 0.7955
Epoch 5/40
30/30 - 10s - monet_gen_loss: 3.4582 - photo_gen_loss: 2.8220 - monet_disc_loss: 0.8134 - photo_disc_loss: 0.7996
Epoch 6/40
30/30 - 10s - monet_gen_loss: 3.2979 - photo_gen_loss: 2.8541 - monet_disc_loss: 0.6098 - photo_disc_loss: 0.7487
Epoch 7/40
30/30 - 10s - monet_gen_loss: 3.2801 - photo_gen_loss: 2.7333 - monet_disc_loss: 0.7488 - photo_disc_loss: 0.7248
Epoch 8/40
30/30 - 10s - monet_gen_loss: 3.2332 - photo_gen_loss: 2.6827 - monet_disc_loss: 0.8233 - photo_disc_loss: 0.7525
Epoch 9/40
30/30 - 10s - monet_gen_loss: 3.3071 - photo_gen_loss: 2.4642 - monet_disc_loss: 0.8394 - photo_disc_loss: 0.7767
Epoch 10/40
30/30 - 10s - monet_gen_loss: 3.3175 - photo_gen_loss: 2.9458 - monet_disc_loss: 0.7954 - photo_disc_loss: 0.6872
Epoch 11/40
30/30 - 10s - monet_gen_loss: 3.2110 - photo_gen_loss: 2.8143 - monet_disc_loss: 0.6822 - photo_disc_loss: 0.6687
Epoch 12/40
30/30 - 10s - monet_gen_loss: 2.9325 - photo_gen_loss: 2.5309 - monet_disc_loss: 0.7266 - photo_disc_loss: 0.7837
Epoch 13/40
30/30 - 10s - monet_gen_loss: 3.3525 - photo_gen_loss: 2.7884 - monet_disc_loss: 0.7315 - photo_disc_loss: 0.7169
Epoch 14/40
30/30 - 10s - monet_gen_loss: 3.1869 - photo_gen_loss: 2.3284 - monet_disc_loss: 0.7643 - photo_disc_loss: 0.8141
Epoch 15/40
30/30 - 10s - monet_gen_loss: 3.0523 - photo_gen_loss: 2.7376 - monet_disc_loss: 0.7984 - photo_disc_loss: 0.6940
Epoch 16/40
30/30 - 10s - monet_gen_loss: 2.9444 - photo_gen_loss: 2.5673 - monet_disc_loss: 0.7854 - photo_disc_loss: 0.6887
Epoch 17/40
30/30 - 10s - monet_gen_loss: 2.9407 - photo_gen_loss: 2.3384 - monet_disc_loss: 0.8242 - photo_disc_loss: 0.7409
Epoch 18/40
30/30 - 10s - monet_gen_loss: 2.7032 - photo_gen_loss: 2.4568 - monet_disc_loss: 0.7218 - photo_disc_loss: 0.6596
Epoch 19/40
30/30 - 10s - monet_gen_loss: 2.7342 - photo_gen_loss: 2.3509 - monet_disc_loss: 0.7861 - photo_disc_loss: 0.7346
Epoch 20/40
30/30 - 10s - monet_gen_loss: 2.5795 - photo_gen_loss: 2.4134 - monet_disc_loss: 0.6480 - photo_disc_loss: 0.6973
Epoch 21/40
30/30 - 10s - monet_gen_loss: 2.8483 - photo_gen_loss: 2.3886 - monet_disc_loss: 0.8464 - photo_disc_loss: 0.7312
Epoch 22/40
30/30 - 10s - monet_gen_loss: 2.7350 - photo_gen_loss: 2.0461 - monet_disc_loss: 0.8393 - photo_disc_loss: 0.8267
Epoch 23/40
30/30 - 10s - monet_gen_loss: 2.5194 - photo_gen_loss: 2.4688 - monet_disc_loss: 0.6893 - photo_disc_loss: 0.6688
Epoch 24/40
30/30 - 10s - monet_gen_loss: 2.8430 - photo_gen_loss: 2.4306 - monet_disc_loss: 0.8942 - photo_disc_loss: 0.6512
Epoch 25/40
30/30 - 10s - monet_gen_loss: 2.4703 - photo_gen_loss: 2.0319 - monet_disc_loss: 0.7158 - photo_disc_loss: 0.8537
Epoch 26/40
30/30 - 10s - monet_gen_loss: 2.7387 - photo_gen_loss: 2.1494 - monet_disc_loss: 1.0012 - photo_disc_loss: 0.7731
Epoch 27/40
30/30 - 10s - monet_gen_loss: 2.3039 - photo_gen_loss: 1.9342 - monet_disc_loss: 0.7775 - photo_disc_loss: 0.8267
Epoch 28/40
30/30 - 10s - monet_gen_loss: 2.7830 - photo_gen_loss: 2.3508 - monet_disc_loss: 0.8930 - photo_disc_loss: 0.7282
Epoch 29/40
30/30 - 10s - monet_gen_loss: 2.5841 - photo_gen_loss: 1.8360 - monet_disc_loss: 0.7216 - photo_disc_loss: 0.8731
Epoch 30/40
30/30 - 10s - monet_gen_loss: 2.3825 - photo_gen_loss: 2.1823 - monet_disc_loss: 0.6694 - photo_disc_loss: 0.7059
Epoch 31/40
30/30 - 10s - monet_gen_loss: 2.3108 - photo_gen_loss: 1.9957 - monet_disc_loss: 0.8305 - photo_disc_loss: 0.7256
Epoch 32/40
30/30 - 10s - monet_gen_loss: 2.6505 - photo_gen_loss: 2.1717 - monet_disc_loss: 0.8632 - photo_disc_loss: 0.7722
Epoch 33/40
30/30 - 10s - monet_gen_loss: 2.0498 - photo_gen_loss: 1.8155 - monet_disc_loss: 0.6562 - photo_disc_loss: 0.7773
Epoch 34/40
30/30 - 10s - monet_gen_loss: 2.5580 - photo_gen_loss: 2.1199 - monet_disc_loss: 0.8822 - photo_disc_loss: 0.7200
Epoch 35/40
30/30 - 10s - monet_gen_loss: 2.2640 - photo_gen_loss: 1.9059 - monet_disc_loss: 0.6495 - photo_disc_loss: 0.8484
Epoch 36/40
30/30 - 10s - monet_gen_loss: 2.5622 - photo_gen_loss: 2.0150 - monet_disc_loss: 0.9318 - photo_disc_loss: 0.8205
Epoch 37/40
30/30 - 10s - monet_gen_loss: 2.2067 - photo_gen_loss: 1.8009 - monet_disc_loss: 0.7906 - photo_disc_loss: 0.8315
Epoch 38/40
30/30 - 10s - monet_gen_loss: 2.7214 - photo_gen_loss: 2.1207 - monet_disc_loss: 1.0073 - photo_disc_loss: 0.7187
Epoch 39/40
30/30 - 10s - monet_gen_loss: 2.2249 - photo_gen_loss: 1.7805 - monet_disc_loss: 0.6442 - photo_disc_loss: 0.8368
Epoch 40/40
30/30 - 10s - monet_gen_loss: 2.6062 - photo_gen_loss: 1.8520 - monet_disc_loss: 0.8660 - photo_disc_loss: 0.7922
*** trained cycle gan final losses ***
{
    "monet_gen_loss": "2.6061606",
    "photo_gen_loss": "1.8519706",
    "monet_disc_loss": "0.86604017",
    "photo_disc_loss": "0.79220164"
}
*** Show trained model predictions sample ***
generator_network_structure_experiment loop (run_combination={'generator_network_structure': <GeneratorNetworkStructure.Thin: 'thin'>, 'train_images_selection_method': <TrainImagesSelectionMethod.FarthestImagesByPixelDistance: 'farthest_images_by_pixel_distance'>}):  38%|███▊      | 3/8 [33:21<51:55, 623.10s/it]
set_training_random_seed() - seed value: 1
found 5 monet and 20 photo tfrec files.
set_training_random_seed() - seed value: 42
2023-02-15 17:18:54.762104: W tensorflow/core/data/root_dataset.cc:167] Optimization loop failed: Cancelled: Operation was cancelled
*** Selected 30 train monet photos (shape: (1, 320, 320, 3)) ***
set_training_random_seed() - seed value: 1
Epoch 1/40
30/30 - 17s - monet_gen_loss: 4.5515 - photo_gen_loss: 5.3784 - monet_disc_loss: 0.7871 - photo_disc_loss: 0.1718
Epoch 2/40
30/30 - 10s - monet_gen_loss: 4.7783 - photo_gen_loss: 4.0517 - monet_disc_loss: 1.1679 - photo_disc_loss: 0.5767
Epoch 3/40
30/30 - 10s - monet_gen_loss: 3.3578 - photo_gen_loss: 2.9687 - monet_disc_loss: 0.7131 - photo_disc_loss: 0.8926
Epoch 4/40
30/30 - 10s - monet_gen_loss: 3.1395 - photo_gen_loss: 3.0934 - monet_disc_loss: 0.7048 - photo_disc_loss: 0.6724
Epoch 5/40
30/30 - 10s - monet_gen_loss: 3.4156 - photo_gen_loss: 3.0878 - monet_disc_loss: 0.6338 - photo_disc_loss: 0.6295
Epoch 6/40
30/30 - 10s - monet_gen_loss: 3.3740 - photo_gen_loss: 2.8740 - monet_disc_loss: 0.6750 - photo_disc_loss: 0.6515
Epoch 7/40
30/30 - 10s - monet_gen_loss: 3.1292 - photo_gen_loss: 2.8032 - monet_disc_loss: 0.6039 - photo_disc_loss: 0.6087
Epoch 8/40
30/30 - 10s - monet_gen_loss: 3.1973 - photo_gen_loss: 2.8812 - monet_disc_loss: 0.6456 - photo_disc_loss: 0.5925
Epoch 9/40
30/30 - 10s - monet_gen_loss: 2.8777 - photo_gen_loss: 2.6888 - monet_disc_loss: 0.5430 - photo_disc_loss: 0.6153
Epoch 10/40
30/30 - 10s - monet_gen_loss: 3.3044 - photo_gen_loss: 3.0282 - monet_disc_loss: 0.6291 - photo_disc_loss: 0.5965
Epoch 11/40
30/30 - 10s - monet_gen_loss: 2.6566 - photo_gen_loss: 2.7466 - monet_disc_loss: 0.6319 - photo_disc_loss: 0.5946
Epoch 12/40
30/30 - 10s - monet_gen_loss: 2.8179 - photo_gen_loss: 2.4145 - monet_disc_loss: 0.6574 - photo_disc_loss: 0.6091
Epoch 13/40
30/30 - 10s - monet_gen_loss: 2.6596 - photo_gen_loss: 2.4417 - monet_disc_loss: 0.6232 - photo_disc_loss: 0.5937
Epoch 14/40
30/30 - 10s - monet_gen_loss: 2.6890 - photo_gen_loss: 2.3865 - monet_disc_loss: 0.5618 - photo_disc_loss: 0.5986
Epoch 15/40
30/30 - 10s - monet_gen_loss: 2.6008 - photo_gen_loss: 2.3229 - monet_disc_loss: 0.5725 - photo_disc_loss: 0.5709
Epoch 16/40
30/30 - 10s - monet_gen_loss: 2.6411 - photo_gen_loss: 2.3981 - monet_disc_loss: 0.5063 - photo_disc_loss: 0.5680
Epoch 17/40
30/30 - 10s - monet_gen_loss: 2.6120 - photo_gen_loss: 2.3514 - monet_disc_loss: 0.6754 - photo_disc_loss: 0.5670
Epoch 18/40
30/30 - 10s - monet_gen_loss: 2.6002 - photo_gen_loss: 2.2765 - monet_disc_loss: 0.5597 - photo_disc_loss: 0.5698
Epoch 19/40
30/30 - 10s - monet_gen_loss: 2.4950 - photo_gen_loss: 2.2282 - monet_disc_loss: 0.6058 - photo_disc_loss: 0.5687
Epoch 20/40
30/30 - 10s - monet_gen_loss: 2.6166 - photo_gen_loss: 2.3836 - monet_disc_loss: 0.5385 - photo_disc_loss: 0.5362
Epoch 21/40
30/30 - 10s - monet_gen_loss: 2.5005 - photo_gen_loss: 2.2818 - monet_disc_loss: 0.6748 - photo_disc_loss: 0.6187
Epoch 22/40
30/30 - 10s - monet_gen_loss: 2.5997 - photo_gen_loss: 2.3079 - monet_disc_loss: 0.6065 - photo_disc_loss: 0.5482
Epoch 23/40
30/30 - 10s - monet_gen_loss: 2.5768 - photo_gen_loss: 2.2814 - monet_disc_loss: 0.5004 - photo_disc_loss: 0.5282
Epoch 24/40
30/30 - 10s - monet_gen_loss: 2.4851 - photo_gen_loss: 2.3423 - monet_disc_loss: 0.6291 - photo_disc_loss: 0.5489
Epoch 25/40
30/30 - 10s - monet_gen_loss: 2.6196 - photo_gen_loss: 2.4544 - monet_disc_loss: 0.5203 - photo_disc_loss: 0.4700
Epoch 26/40
30/30 - 10s - monet_gen_loss: 2.3692 - photo_gen_loss: 2.2247 - monet_disc_loss: 0.5510 - photo_disc_loss: 0.5575
Epoch 27/40
30/30 - 10s - monet_gen_loss: 2.5881 - photo_gen_loss: 2.2794 - monet_disc_loss: 0.4942 - photo_disc_loss: 0.5613
Epoch 28/40
30/30 - 10s - monet_gen_loss: 2.3980 - photo_gen_loss: 2.3513 - monet_disc_loss: 0.5642 - photo_disc_loss: 0.5312
Epoch 29/40
30/30 - 10s - monet_gen_loss: 2.4808 - photo_gen_loss: 2.4214 - monet_disc_loss: 0.5270 - photo_disc_loss: 0.5209
Epoch 30/40
30/30 - 10s - monet_gen_loss: 2.5388 - photo_gen_loss: 2.4068 - monet_disc_loss: 0.5167 - photo_disc_loss: 0.5223
Epoch 31/40
30/30 - 10s - monet_gen_loss: 2.4946 - photo_gen_loss: 2.3037 - monet_disc_loss: 0.5440 - photo_disc_loss: 0.5296
Epoch 32/40
30/30 - 10s - monet_gen_loss: 2.4492 - photo_gen_loss: 2.2225 - monet_disc_loss: 0.5141 - photo_disc_loss: 0.4997
Epoch 33/40
30/30 - 10s - monet_gen_loss: 2.3362 - photo_gen_loss: 2.3925 - monet_disc_loss: 0.5836 - photo_disc_loss: 0.4951
Epoch 34/40
30/30 - 10s - monet_gen_loss: 2.6071 - photo_gen_loss: 2.3150 - monet_disc_loss: 0.4565 - photo_disc_loss: 0.5152
Epoch 35/40
30/30 - 10s - monet_gen_loss: 2.3406 - photo_gen_loss: 2.4969 - monet_disc_loss: 0.8486 - photo_disc_loss: 0.5108
Epoch 36/40
30/30 - 10s - monet_gen_loss: 2.4405 - photo_gen_loss: 2.5083 - monet_disc_loss: 0.5069 - photo_disc_loss: 0.4522
Epoch 37/40
30/30 - 10s - monet_gen_loss: 2.4042 - photo_gen_loss: 2.0422 - monet_disc_loss: 0.6734 - photo_disc_loss: 0.5637
Epoch 38/40
30/30 - 10s - monet_gen_loss: 2.6631 - photo_gen_loss: 2.2905 - monet_disc_loss: 0.5073 - photo_disc_loss: 0.4890
Epoch 39/40
30/30 - 10s - monet_gen_loss: 2.4480 - photo_gen_loss: 2.1542 - monet_disc_loss: 0.5222 - photo_disc_loss: 0.5081
Epoch 40/40
30/30 - 10s - monet_gen_loss: 2.5223 - photo_gen_loss: 2.4473 - monet_disc_loss: 0.4823 - photo_disc_loss: 0.5082
*** trained cycle gan final losses ***
{
    "monet_gen_loss": "2.522315",
    "photo_gen_loss": "2.447285",
    "monet_disc_loss": "0.48227173",
    "photo_disc_loss": "0.50821066"
}
*** Show trained model predictions sample ***
generator_network_structure_experiment loop (run_combination={'generator_network_structure': <GeneratorNetworkStructure.Wide: 'wide'>, 'train_images_selection_method': <TrainImagesSelectionMethod.RandomSelection: 'random_selection'>}):  50%|█████     | 4/8 [40:18<36:07, 541.85s/it]                               
set_training_random_seed() - seed value: 1
found 5 monet and 20 photo tfrec files.
set_training_random_seed() - seed value: 42
2023-02-15 17:25:51.647706: W tensorflow/core/data/root_dataset.cc:167] Optimization loop failed: Cancelled: Operation was cancelled
*** Selected 30 train monet photos (shape: (1, 320, 320, 3)) ***
set_training_random_seed() - seed value: 1
Epoch 1/40
30/30 - 40s - monet_gen_loss: 42.9992 - photo_gen_loss: 26.6610 - monet_disc_loss: 3.1331e-05 - photo_disc_loss: 0.2013
Epoch 2/40
30/30 - 33s - monet_gen_loss: 34.5780 - photo_gen_loss: 28.7995 - monet_disc_loss: 0.0013 - photo_disc_loss: 0.1888
Epoch 3/40
30/30 - 33s - monet_gen_loss: 58.0853 - photo_gen_loss: 25.9595 - monet_disc_loss: 3.6230e-08 - photo_disc_loss: 0.3670
Epoch 4/40
30/30 - 33s - monet_gen_loss: 59.4278 - photo_gen_loss: 27.8396 - monet_disc_loss: 2.1647e-08 - photo_disc_loss: 0.1397
Epoch 5/40
30/30 - 33s - monet_gen_loss: 59.4952 - photo_gen_loss: 28.2371 - monet_disc_loss: 2.1091e-08 - photo_disc_loss: 0.0454
Epoch 6/40
30/30 - 33s - monet_gen_loss: 59.5020 - photo_gen_loss: 26.6663 - monet_disc_loss: 2.1043e-08 - photo_disc_loss: 0.2230
Epoch 7/40
30/30 - 33s - monet_gen_loss: 59.5058 - photo_gen_loss: 27.0101 - monet_disc_loss: 2.1010e-08 - photo_disc_loss: 0.1423
Epoch 8/40
30/30 - 33s - monet_gen_loss: 59.5098 - photo_gen_loss: 29.4588 - monet_disc_loss: 2.0955e-08 - photo_disc_loss: 0.1005
Epoch 9/40
30/30 - 33s - monet_gen_loss: 59.5136 - photo_gen_loss: 29.8980 - monet_disc_loss: 2.0871e-08 - photo_disc_loss: 0.0196
Epoch 10/40
30/30 - 33s - monet_gen_loss: 59.5177 - photo_gen_loss: 35.2548 - monet_disc_loss: 2.0862e-08 - photo_disc_loss: 1.1614e-04
Epoch 11/40
30/30 - 33s - monet_gen_loss: 59.5223 - photo_gen_loss: 38.3401 - monet_disc_loss: 2.0799e-08 - photo_disc_loss: 1.5961e-05
Epoch 12/40
30/30 - 33s - monet_gen_loss: 59.5269 - photo_gen_loss: 38.8312 - monet_disc_loss: 2.0777e-08 - photo_disc_loss: 1.1406e-05
Epoch 13/40
30/30 - 33s - monet_gen_loss: 59.5312 - photo_gen_loss: 39.0687 - monet_disc_loss: 2.0652e-08 - photo_disc_loss: 9.6660e-06
Epoch 14/40
30/30 - 33s - monet_gen_loss: 52.7688 - photo_gen_loss: 39.4644 - monet_disc_loss: 2.4858e-07 - photo_disc_loss: 8.4929e-06
Epoch 15/40
30/30 - 33s - monet_gen_loss: 52.5773 - photo_gen_loss: 39.6344 - monet_disc_loss: 2.5282e-07 - photo_disc_loss: 7.6072e-06
Epoch 16/40
30/30 - 33s - monet_gen_loss: 52.6402 - photo_gen_loss: 39.7803 - monet_disc_loss: 2.4549e-07 - photo_disc_loss: 6.8861e-06
Epoch 17/40
30/30 - 33s - monet_gen_loss: 52.7017 - photo_gen_loss: 39.9132 - monet_disc_loss: 2.3786e-07 - photo_disc_loss: 6.2998e-06
Epoch 18/40
30/30 - 33s - monet_gen_loss: 52.7618 - photo_gen_loss: 40.0354 - monet_disc_loss: 2.3056e-07 - photo_disc_loss: 5.8118e-06
Epoch 19/40
30/30 - 33s - monet_gen_loss: 52.8234 - photo_gen_loss: 40.1479 - monet_disc_loss: 2.2356e-07 - photo_disc_loss: 5.4015e-06
Epoch 20/40
30/30 - 33s - monet_gen_loss: 52.8834 - photo_gen_loss: 40.2511 - monet_disc_loss: 2.1688e-07 - photo_disc_loss: 5.0539e-06
Epoch 21/40
30/30 - 33s - monet_gen_loss: 52.9438 - photo_gen_loss: 40.3470 - monet_disc_loss: 2.1001e-07 - photo_disc_loss: 4.7547e-06
Epoch 22/40
30/30 - 33s - monet_gen_loss: 53.0012 - photo_gen_loss: 40.4355 - monet_disc_loss: 2.0437e-07 - photo_disc_loss: 4.4967e-06
Epoch 23/40
30/30 - 33s - monet_gen_loss: 53.0576 - photo_gen_loss: 40.5171 - monet_disc_loss: 1.9853e-07 - photo_disc_loss: 4.2725e-06
Epoch 24/40
30/30 - 33s - monet_gen_loss: 53.1160 - photo_gen_loss: 40.5940 - monet_disc_loss: 1.9247e-07 - photo_disc_loss: 4.0736e-06
Epoch 25/40
30/30 - 33s - monet_gen_loss: 53.1697 - photo_gen_loss: 40.6643 - monet_disc_loss: 1.8755e-07 - photo_disc_loss: 3.8999e-06
Epoch 26/40
30/30 - 33s - monet_gen_loss: 53.2250 - photo_gen_loss: 40.7298 - monet_disc_loss: 1.8200e-07 - photo_disc_loss: 3.7471e-06
Epoch 27/40
30/30 - 33s - monet_gen_loss: 53.2780 - photo_gen_loss: 40.7926 - monet_disc_loss: 1.7753e-07 - photo_disc_loss: 3.6022e-06
Epoch 28/40
30/30 - 33s - monet_gen_loss: 53.3308 - photo_gen_loss: 40.8521 - monet_disc_loss: 1.7283e-07 - photo_disc_loss: 3.4724e-06
Epoch 29/40
30/30 - 33s - monet_gen_loss: 53.3827 - photo_gen_loss: 40.9090 - monet_disc_loss: 1.6833e-07 - photo_disc_loss: 3.3531e-06
Epoch 30/40
30/30 - 33s - monet_gen_loss: 53.4338 - photo_gen_loss: 40.9636 - monet_disc_loss: 1.6400e-07 - photo_disc_loss: 3.2430e-06
Epoch 31/40
30/30 - 33s - monet_gen_loss: 53.4842 - photo_gen_loss: 41.0156 - monet_disc_loss: 1.5984e-07 - photo_disc_loss: 3.1407e-06
Epoch 32/40
30/30 - 33s - monet_gen_loss: 53.5329 - photo_gen_loss: 41.0656 - monet_disc_loss: 1.5586e-07 - photo_disc_loss: 3.0454e-06
Epoch 33/40
30/30 - 33s - monet_gen_loss: 53.5820 - photo_gen_loss: 41.1136 - monet_disc_loss: 1.5167e-07 - photo_disc_loss: 2.9563e-06
Epoch 34/40
30/30 - 33s - monet_gen_loss: 53.6300 - photo_gen_loss: 41.1599 - monet_disc_loss: 1.4833e-07 - photo_disc_loss: 2.8726e-06
Epoch 35/40
30/30 - 33s - monet_gen_loss: 53.6773 - photo_gen_loss: 41.2043 - monet_disc_loss: 1.4477e-07 - photo_disc_loss: 2.7936e-06
Epoch 36/40
30/30 - 33s - monet_gen_loss: 53.7235 - photo_gen_loss: 41.2476 - monet_disc_loss: 1.4102e-07 - photo_disc_loss: 2.7190e-06
Epoch 37/40
30/30 - 33s - monet_gen_loss: 53.7699 - photo_gen_loss: 41.2892 - monet_disc_loss: 1.3806e-07 - photo_disc_loss: 2.6482e-06
Epoch 38/40
30/30 - 33s - monet_gen_loss: 53.8146 - photo_gen_loss: 41.3293 - monet_disc_loss: 1.3488e-07 - photo_disc_loss: 2.5809e-06
Epoch 39/40
30/30 - 33s - monet_gen_loss: 53.8594 - photo_gen_loss: 41.3683 - monet_disc_loss: 1.3182e-07 - photo_disc_loss: 2.5185e-06
Epoch 40/40
30/30 - 33s - monet_gen_loss: 53.9050 - photo_gen_loss: 41.4065 - monet_disc_loss: 1.2855e-07 - photo_disc_loss: 2.4572e-06
*** trained cycle gan final losses ***
{
    "monet_gen_loss": "53.904995",
    "photo_gen_loss": "41.406487",
    "monet_disc_loss": "1.2855122e-07",
    "photo_disc_loss": "2.4571907e-06"
}
*** Show trained model predictions sample ***
generator_network_structure_experiment loop (run_combination={'generator_network_structure': <GeneratorNetworkStructure.Wide: 'wide'>, 'train_images_selection_method': <TrainImagesSelectionMethod.FarthestImagesByPixelDistance: 'farthest_images_by_pixel_distance'>}):  62%|██████▎   | 5/8 [1:07:33<46:48, 936.11s/it]
set_training_random_seed() - seed value: 1
found 5 monet and 20 photo tfrec files.
set_training_random_seed() - seed value: 42
2023-02-15 17:53:06.816698: W tensorflow/core/data/root_dataset.cc:167] Optimization loop failed: Cancelled: Operation was cancelled
*** Selected 30 train monet photos (shape: (1, 320, 320, 3)) ***
set_training_random_seed() - seed value: 1
Epoch 1/40
30/30 - 41s - monet_gen_loss: 11.0363 - photo_gen_loss: 10.5668 - monet_disc_loss: 0.6488 - photo_disc_loss: 2.7502
Epoch 2/40
30/30 - 33s - monet_gen_loss: 6.5437 - photo_gen_loss: 7.4271 - monet_disc_loss: 1.1823 - photo_disc_loss: 0.2241
Epoch 3/40
30/30 - 33s - monet_gen_loss: 5.9551 - photo_gen_loss: 5.7164 - monet_disc_loss: 1.3582 - photo_disc_loss: 0.5212
Epoch 4/40
30/30 - 33s - monet_gen_loss: 5.1562 - photo_gen_loss: 5.4527 - monet_disc_loss: 0.6164 - photo_disc_loss: 0.3070
Epoch 5/40
30/30 - 33s - monet_gen_loss: 4.5845 - photo_gen_loss: 4.9217 - monet_disc_loss: 0.8270 - photo_disc_loss: 0.3918
Epoch 6/40
30/30 - 33s - monet_gen_loss: 4.8575 - photo_gen_loss: 4.5374 - monet_disc_loss: 0.9554 - photo_disc_loss: 0.5124
Epoch 7/40
30/30 - 33s - monet_gen_loss: 3.6382 - photo_gen_loss: 3.4186 - monet_disc_loss: 0.6710 - photo_disc_loss: 0.6629
Epoch 8/40
30/30 - 33s - monet_gen_loss: 3.7242 - photo_gen_loss: 3.3219 - monet_disc_loss: 0.7430 - photo_disc_loss: 0.6268
Epoch 9/40
30/30 - 33s - monet_gen_loss: 3.4427 - photo_gen_loss: 3.1999 - monet_disc_loss: 0.6735 - photo_disc_loss: 0.6525
Epoch 10/40
30/30 - 33s - monet_gen_loss: 3.5342 - photo_gen_loss: 3.1035 - monet_disc_loss: 0.6530 - photo_disc_loss: 0.6505
Epoch 11/40
30/30 - 33s - monet_gen_loss: 3.4917 - photo_gen_loss: 3.1826 - monet_disc_loss: 0.6635 - photo_disc_loss: 0.5986
Epoch 12/40
30/30 - 33s - monet_gen_loss: 3.3751 - photo_gen_loss: 3.1341 - monet_disc_loss: 0.6983 - photo_disc_loss: 0.6131
Epoch 13/40
30/30 - 33s - monet_gen_loss: 3.5018 - photo_gen_loss: 2.8954 - monet_disc_loss: 0.7003 - photo_disc_loss: 0.7371
Epoch 14/40
30/30 - 33s - monet_gen_loss: 3.3361 - photo_gen_loss: 2.9261 - monet_disc_loss: 0.6505 - photo_disc_loss: 0.6509
Epoch 15/40
30/30 - 33s - monet_gen_loss: 3.3610 - photo_gen_loss: 2.8778 - monet_disc_loss: 0.6557 - photo_disc_loss: 0.6998
Epoch 16/40
30/30 - 33s - monet_gen_loss: 3.1939 - photo_gen_loss: 3.0494 - monet_disc_loss: 0.6710 - photo_disc_loss: 0.5565
Epoch 17/40
30/30 - 33s - monet_gen_loss: 3.4816 - photo_gen_loss: 2.8994 - monet_disc_loss: 0.5453 - photo_disc_loss: 0.6709
Epoch 18/40
30/30 - 33s - monet_gen_loss: 3.2415 - photo_gen_loss: 3.0712 - monet_disc_loss: 0.6975 - photo_disc_loss: 0.6166
Epoch 19/40
30/30 - 33s - monet_gen_loss: 3.3020 - photo_gen_loss: 2.8512 - monet_disc_loss: 0.6736 - photo_disc_loss: 0.6789
Epoch 20/40
30/30 - 33s - monet_gen_loss: 3.1882 - photo_gen_loss: 2.9821 - monet_disc_loss: 0.6449 - photo_disc_loss: 0.5995
Epoch 21/40
30/30 - 33s - monet_gen_loss: 3.2669 - photo_gen_loss: 2.9124 - monet_disc_loss: 0.6678 - photo_disc_loss: 0.6126
Epoch 22/40
30/30 - 33s - monet_gen_loss: 2.8301 - photo_gen_loss: 2.8170 - monet_disc_loss: 0.5804 - photo_disc_loss: 0.5582
Epoch 23/40
30/30 - 33s - monet_gen_loss: 2.9250 - photo_gen_loss: 2.4742 - monet_disc_loss: 0.6319 - photo_disc_loss: 0.6870
Epoch 24/40
30/30 - 33s - monet_gen_loss: 2.8156 - photo_gen_loss: 2.6202 - monet_disc_loss: 0.5829 - photo_disc_loss: 0.5772
Epoch 25/40
30/30 - 33s - monet_gen_loss: 3.0207 - photo_gen_loss: 2.6136 - monet_disc_loss: 0.5579 - photo_disc_loss: 0.5838
Epoch 26/40
30/30 - 33s - monet_gen_loss: 2.6972 - photo_gen_loss: 2.4799 - monet_disc_loss: 0.6280 - photo_disc_loss: 0.5866
Epoch 27/40
30/30 - 33s - monet_gen_loss: 2.7948 - photo_gen_loss: 2.5693 - monet_disc_loss: 0.5535 - photo_disc_loss: 0.5285
Epoch 28/40
30/30 - 33s - monet_gen_loss: 2.7291 - photo_gen_loss: 2.4871 - monet_disc_loss: 0.6022 - photo_disc_loss: 0.6215
Epoch 29/40
30/30 - 33s - monet_gen_loss: 2.7592 - photo_gen_loss: 2.4860 - monet_disc_loss: 0.5790 - photo_disc_loss: 0.5666
Epoch 30/40
30/30 - 33s - monet_gen_loss: 2.6600 - photo_gen_loss: 2.4934 - monet_disc_loss: 0.5708 - photo_disc_loss: 0.5548
Epoch 31/40
30/30 - 33s - monet_gen_loss: 2.7373 - photo_gen_loss: 2.5347 - monet_disc_loss: 0.6397 - photo_disc_loss: 0.5691
Epoch 32/40
30/30 - 33s - monet_gen_loss: 2.6323 - photo_gen_loss: 2.5440 - monet_disc_loss: 0.5605 - photo_disc_loss: 0.5072
Epoch 33/40
30/30 - 33s - monet_gen_loss: 2.7482 - photo_gen_loss: 2.4052 - monet_disc_loss: 0.5034 - photo_disc_loss: 0.5684
Epoch 34/40
30/30 - 33s - monet_gen_loss: 2.7275 - photo_gen_loss: 2.3708 - monet_disc_loss: 0.5761 - photo_disc_loss: 0.5790
Epoch 35/40
30/30 - 33s - monet_gen_loss: 2.5928 - photo_gen_loss: 2.4641 - monet_disc_loss: 0.5299 - photo_disc_loss: 0.5298
Epoch 36/40
30/30 - 33s - monet_gen_loss: 2.6405 - photo_gen_loss: 2.4739 - monet_disc_loss: 0.6246 - photo_disc_loss: 0.5530
Epoch 37/40
30/30 - 33s - monet_gen_loss: 2.6621 - photo_gen_loss: 2.5289 - monet_disc_loss: 0.5593 - photo_disc_loss: 0.4866
Epoch 38/40
30/30 - 33s - monet_gen_loss: 2.7250 - photo_gen_loss: 2.4611 - monet_disc_loss: 0.5181 - photo_disc_loss: 0.5155
Epoch 39/40
30/30 - 33s - monet_gen_loss: 2.7030 - photo_gen_loss: 2.4487 - monet_disc_loss: 0.5799 - photo_disc_loss: 0.5315
Epoch 40/40
30/30 - 33s - monet_gen_loss: 2.5977 - photo_gen_loss: 2.4588 - monet_disc_loss: 0.5308 - photo_disc_loss: 0.4856
*** trained cycle gan final losses ***
{
    "monet_gen_loss": "2.5977178",
    "photo_gen_loss": "2.4588454",
    "monet_disc_loss": "0.53080577",
    "photo_disc_loss": "0.48556075"
}
*** Show trained model predictions sample ***
generator_network_structure_experiment loop (run_combination={'generator_network_structure': <GeneratorNetworkStructure.Deep: 'deep'>, 'train_images_selection_method': <TrainImagesSelectionMethod.RandomSelection: 'random_selection'>}):  75%|███████▌  | 6/8 [1:34:34<38:57, 1168.76s/it]                               
set_training_random_seed() - seed value: 1
found 5 monet and 20 photo tfrec files.
set_training_random_seed() - seed value: 42
2023-02-15 18:20:07.170798: W tensorflow/core/data/root_dataset.cc:167] Optimization loop failed: Cancelled: Operation was cancelled
*** Selected 30 train monet photos (shape: (1, 320, 320, 3)) ***
set_training_random_seed() - seed value: 1
Epoch 1/40
30/30 - 27s - monet_gen_loss: 20.7014 - photo_gen_loss: 33.8561 - monet_disc_loss: 0.3954 - photo_disc_loss: 2.5011e-05
Epoch 2/40
30/30 - 18s - monet_gen_loss: 14.9586 - photo_gen_loss: 59.3551 - monet_disc_loss: 0.5503 - photo_disc_loss: 3.5026e-11
Epoch 3/40
30/30 - 18s - monet_gen_loss: 15.3484 - photo_gen_loss: 64.0509 - monet_disc_loss: 0.4089 - photo_disc_loss: 5.2862e-12
Epoch 4/40
30/30 - 18s - monet_gen_loss: 15.5235 - photo_gen_loss: 64.4749 - monet_disc_loss: 0.4455 - photo_disc_loss: 4.7953e-12
Epoch 5/40
30/30 - 18s - monet_gen_loss: 15.3892 - photo_gen_loss: 64.4937 - monet_disc_loss: 0.6359 - photo_disc_loss: 4.7732e-12
Epoch 6/40
30/30 - 18s - monet_gen_loss: 15.3954 - photo_gen_loss: 64.6191 - monet_disc_loss: 0.5390 - photo_disc_loss: 4.7726e-12
Epoch 7/40
30/30 - 18s - monet_gen_loss: 15.2785 - photo_gen_loss: 64.7998 - monet_disc_loss: 0.4396 - photo_disc_loss: 4.7726e-12
Epoch 8/40
30/30 - 18s - monet_gen_loss: 15.4768 - photo_gen_loss: 65.0121 - monet_disc_loss: 0.6424 - photo_disc_loss: 4.7726e-12
Epoch 9/40
30/30 - 18s - monet_gen_loss: 16.6443 - photo_gen_loss: 65.4653 - monet_disc_loss: 0.3046 - photo_disc_loss: 4.7726e-12
Epoch 10/40
30/30 - 18s - monet_gen_loss: 15.1218 - photo_gen_loss: 64.5970 - monet_disc_loss: 0.5377 - photo_disc_loss: 4.7726e-12
Epoch 11/40
30/30 - 18s - monet_gen_loss: 15.9524 - photo_gen_loss: 65.0678 - monet_disc_loss: 0.5831 - photo_disc_loss: 4.7726e-12
Epoch 12/40
30/30 - 18s - monet_gen_loss: 15.3651 - photo_gen_loss: 64.9423 - monet_disc_loss: 0.5980 - photo_disc_loss: 4.7726e-12
Epoch 13/40
30/30 - 18s - monet_gen_loss: 15.6099 - photo_gen_loss: 65.1085 - monet_disc_loss: 0.4440 - photo_disc_loss: 4.7726e-12
Epoch 14/40
30/30 - 18s - monet_gen_loss: 15.5684 - photo_gen_loss: 65.1585 - monet_disc_loss: 0.5166 - photo_disc_loss: 4.7726e-12
Epoch 15/40
30/30 - 18s - monet_gen_loss: 15.9508 - photo_gen_loss: 65.3537 - monet_disc_loss: 0.9184 - photo_disc_loss: 4.7726e-12
Epoch 16/40
30/30 - 18s - monet_gen_loss: 15.8842 - photo_gen_loss: 65.1550 - monet_disc_loss: 0.6687 - photo_disc_loss: 4.7726e-12
Epoch 17/40
30/30 - 18s - monet_gen_loss: 15.9591 - photo_gen_loss: 65.5729 - monet_disc_loss: 0.6592 - photo_disc_loss: 4.7726e-12
Epoch 18/40
30/30 - 18s - monet_gen_loss: 15.5259 - photo_gen_loss: 65.0155 - monet_disc_loss: 0.4790 - photo_disc_loss: 4.7726e-12
Epoch 19/40
30/30 - 18s - monet_gen_loss: 15.2374 - photo_gen_loss: 65.0062 - monet_disc_loss: 0.8154 - photo_disc_loss: 4.7726e-12
Epoch 20/40
30/30 - 18s - monet_gen_loss: 15.4206 - photo_gen_loss: 65.2546 - monet_disc_loss: 0.7128 - photo_disc_loss: 4.7726e-12
Epoch 21/40
30/30 - 18s - monet_gen_loss: 15.7066 - photo_gen_loss: 64.7802 - monet_disc_loss: 0.3634 - photo_disc_loss: 4.7726e-12
Epoch 22/40
30/30 - 18s - monet_gen_loss: 13.9132 - photo_gen_loss: 63.7596 - monet_disc_loss: 0.9825 - photo_disc_loss: 4.7726e-12
Epoch 23/40
30/30 - 18s - monet_gen_loss: 14.6612 - photo_gen_loss: 65.1839 - monet_disc_loss: 0.6635 - photo_disc_loss: 4.7726e-12
Epoch 24/40
30/30 - 18s - monet_gen_loss: 14.9068 - photo_gen_loss: 65.2202 - monet_disc_loss: 0.7471 - photo_disc_loss: 4.7726e-12
Epoch 25/40
30/30 - 18s - monet_gen_loss: 15.3967 - photo_gen_loss: 65.5466 - monet_disc_loss: 0.4442 - photo_disc_loss: 4.7726e-12
Epoch 26/40
30/30 - 18s - monet_gen_loss: 14.3847 - photo_gen_loss: 65.0817 - monet_disc_loss: 0.6429 - photo_disc_loss: 4.7726e-12
Epoch 27/40
30/30 - 18s - monet_gen_loss: 16.2750 - photo_gen_loss: 66.0482 - monet_disc_loss: 0.7653 - photo_disc_loss: 4.7726e-12
Epoch 28/40
30/30 - 18s - monet_gen_loss: 15.5572 - photo_gen_loss: 65.5381 - monet_disc_loss: 0.4277 - photo_disc_loss: 4.7726e-12
Epoch 29/40
30/30 - 18s - monet_gen_loss: 15.3360 - photo_gen_loss: 65.5861 - monet_disc_loss: 0.6794 - photo_disc_loss: 4.7726e-12
Epoch 30/40
30/30 - 18s - monet_gen_loss: 15.3781 - photo_gen_loss: 65.7736 - monet_disc_loss: 0.8841 - photo_disc_loss: 4.7726e-12
Epoch 31/40
30/30 - 18s - monet_gen_loss: 15.2665 - photo_gen_loss: 65.4774 - monet_disc_loss: 0.7094 - photo_disc_loss: 4.7726e-12
Epoch 32/40
30/30 - 18s - monet_gen_loss: 15.3361 - photo_gen_loss: 65.5812 - monet_disc_loss: 0.6377 - photo_disc_loss: 4.7726e-12
Epoch 33/40
30/30 - 18s - monet_gen_loss: 14.7962 - photo_gen_loss: 65.1417 - monet_disc_loss: 0.7310 - photo_disc_loss: 4.7726e-12
Epoch 34/40
30/30 - 18s - monet_gen_loss: 15.0212 - photo_gen_loss: 65.3766 - monet_disc_loss: 0.7993 - photo_disc_loss: 4.7726e-12
Epoch 35/40
30/30 - 18s - monet_gen_loss: 15.2718 - photo_gen_loss: 65.3505 - monet_disc_loss: 0.9309 - photo_disc_loss: 4.7726e-12
Epoch 36/40
30/30 - 18s - monet_gen_loss: 14.8566 - photo_gen_loss: 65.5292 - monet_disc_loss: 0.6221 - photo_disc_loss: 4.7726e-12
Epoch 37/40
30/30 - 18s - monet_gen_loss: 14.6800 - photo_gen_loss: 65.1357 - monet_disc_loss: 0.6923 - photo_disc_loss: 4.7726e-12
Epoch 38/40
30/30 - 18s - monet_gen_loss: 15.3166 - photo_gen_loss: 65.5894 - monet_disc_loss: 0.9749 - photo_disc_loss: 4.7726e-12
Epoch 39/40
30/30 - 18s - monet_gen_loss: 14.7962 - photo_gen_loss: 65.3656 - monet_disc_loss: 0.7965 - photo_disc_loss: 4.7726e-12
Epoch 40/40
30/30 - 18s - monet_gen_loss: 15.0028 - photo_gen_loss: 65.4161 - monet_disc_loss: 0.8250 - photo_disc_loss: 4.7726e-12
*** trained cycle gan final losses ***
{
    "monet_gen_loss": "15.002794",
    "photo_gen_loss": "65.4161",
    "monet_disc_loss": "0.82495534",
    "photo_disc_loss": "4.772573e-12"
}
*** Show trained model predictions sample ***
generator_network_structure_experiment loop (run_combination={'generator_network_structure': <GeneratorNetworkStructure.Deep: 'deep'>, 'train_images_selection_method': <TrainImagesSelectionMethod.FarthestImagesByPixelDistance: 'farthest_images_by_pixel_distance'>}):  88%|████████▊ | 7/8 [1:48:04<17:31, 1051.61s/it]
set_training_random_seed() - seed value: 1
found 5 monet and 20 photo tfrec files.
set_training_random_seed() - seed value: 42
2023-02-15 18:33:38.065696: W tensorflow/core/data/root_dataset.cc:167] Optimization loop failed: Cancelled: Operation was cancelled
*** Selected 30 train monet photos (shape: (1, 320, 320, 3)) ***
set_training_random_seed() - seed value: 1
Epoch 1/40
30/30 - 27s - monet_gen_loss: 6.2116 - photo_gen_loss: 5.5460 - monet_disc_loss: 0.3674 - photo_disc_loss: 0.6230
Epoch 2/40
30/30 - 18s - monet_gen_loss: 4.0206 - photo_gen_loss: 4.3031 - monet_disc_loss: 0.6764 - photo_disc_loss: 0.8499
Epoch 3/40
30/30 - 18s - monet_gen_loss: 3.4013 - photo_gen_loss: 3.0315 - monet_disc_loss: 0.6665 - photo_disc_loss: 0.7512
Epoch 4/40
30/30 - 18s - monet_gen_loss: 3.4589 - photo_gen_loss: 2.9351 - monet_disc_loss: 0.6943 - photo_disc_loss: 0.7045
Epoch 5/40
30/30 - 18s - monet_gen_loss: 3.6223 - photo_gen_loss: 2.9620 - monet_disc_loss: 0.6924 - photo_disc_loss: 0.6944
Epoch 6/40
30/30 - 18s - monet_gen_loss: 3.1774 - photo_gen_loss: 2.8747 - monet_disc_loss: 0.6835 - photo_disc_loss: 0.6850
Epoch 7/40
30/30 - 18s - monet_gen_loss: 3.4273 - photo_gen_loss: 2.9083 - monet_disc_loss: 0.6336 - photo_disc_loss: 0.6780
Epoch 8/40
30/30 - 18s - monet_gen_loss: 2.8482 - photo_gen_loss: 2.6991 - monet_disc_loss: 0.6836 - photo_disc_loss: 0.6643
Epoch 9/40
30/30 - 18s - monet_gen_loss: 2.8993 - photo_gen_loss: 2.5531 - monet_disc_loss: 0.6115 - photo_disc_loss: 0.6657
Epoch 10/40
30/30 - 18s - monet_gen_loss: 2.7969 - photo_gen_loss: 2.5443 - monet_disc_loss: 0.6739 - photo_disc_loss: 0.6527
Epoch 11/40
30/30 - 18s - monet_gen_loss: 2.7495 - photo_gen_loss: 2.5028 - monet_disc_loss: 0.4985 - photo_disc_loss: 0.6311
Epoch 12/40
30/30 - 18s - monet_gen_loss: 2.6965 - photo_gen_loss: 2.4743 - monet_disc_loss: 0.6667 - photo_disc_loss: 0.5927
Epoch 13/40
30/30 - 18s - monet_gen_loss: 2.7229 - photo_gen_loss: 2.2700 - monet_disc_loss: 0.6453 - photo_disc_loss: 0.6614
Epoch 14/40
30/30 - 18s - monet_gen_loss: 2.7570 - photo_gen_loss: 2.2380 - monet_disc_loss: 0.5216 - photo_disc_loss: 0.6456
Epoch 15/40
30/30 - 18s - monet_gen_loss: 2.5712 - photo_gen_loss: 2.3531 - monet_disc_loss: 0.6868 - photo_disc_loss: 0.6031
Epoch 16/40
30/30 - 18s - monet_gen_loss: 2.5500 - photo_gen_loss: 2.4818 - monet_disc_loss: 0.5845 - photo_disc_loss: 0.4997
Epoch 17/40
30/30 - 18s - monet_gen_loss: 2.6251 - photo_gen_loss: 2.2437 - monet_disc_loss: 0.5734 - photo_disc_loss: 0.6405
Epoch 18/40
30/30 - 18s - monet_gen_loss: 2.5169 - photo_gen_loss: 2.3224 - monet_disc_loss: 0.5288 - photo_disc_loss: 0.6215
Epoch 19/40
30/30 - 18s - monet_gen_loss: 2.6729 - photo_gen_loss: 2.4584 - monet_disc_loss: 0.5467 - photo_disc_loss: 0.5734
Epoch 20/40
30/30 - 18s - monet_gen_loss: 2.2874 - photo_gen_loss: 2.4115 - monet_disc_loss: 0.6004 - photo_disc_loss: 0.5673
Epoch 21/40
30/30 - 18s - monet_gen_loss: 2.6430 - photo_gen_loss: 2.3529 - monet_disc_loss: 0.5505 - photo_disc_loss: 0.5563
Epoch 22/40
30/30 - 18s - monet_gen_loss: 2.3873 - photo_gen_loss: 2.4312 - monet_disc_loss: 0.5863 - photo_disc_loss: 0.5817
Epoch 23/40
30/30 - 18s - monet_gen_loss: 2.5668 - photo_gen_loss: 2.4743 - monet_disc_loss: 0.6187 - photo_disc_loss: 0.5195
Epoch 24/40
30/30 - 18s - monet_gen_loss: 2.6303 - photo_gen_loss: 2.6657 - monet_disc_loss: 0.6331 - photo_disc_loss: 0.5546
Epoch 25/40
30/30 - 18s - monet_gen_loss: 2.5632 - photo_gen_loss: 2.2954 - monet_disc_loss: 0.6267 - photo_disc_loss: 0.5574
Epoch 26/40
30/30 - 18s - monet_gen_loss: 2.6812 - photo_gen_loss: 2.4876 - monet_disc_loss: 0.5010 - photo_disc_loss: 0.5178
Epoch 27/40
30/30 - 18s - monet_gen_loss: 2.3959 - photo_gen_loss: 2.4808 - monet_disc_loss: 0.6253 - photo_disc_loss: 0.5464
Epoch 28/40
30/30 - 18s - monet_gen_loss: 2.5793 - photo_gen_loss: 2.4264 - monet_disc_loss: 0.5126 - photo_disc_loss: 0.4857
Epoch 29/40
30/30 - 18s - monet_gen_loss: 2.3396 - photo_gen_loss: 2.5018 - monet_disc_loss: 0.5650 - photo_disc_loss: 0.5602
Epoch 30/40
30/30 - 18s - monet_gen_loss: 2.6042 - photo_gen_loss: 2.4729 - monet_disc_loss: 0.4788 - photo_disc_loss: 0.4920
Epoch 31/40
30/30 - 18s - monet_gen_loss: 2.4982 - photo_gen_loss: 2.5419 - monet_disc_loss: 0.5838 - photo_disc_loss: 0.5656
Epoch 32/40
30/30 - 18s - monet_gen_loss: 2.4382 - photo_gen_loss: 2.6171 - monet_disc_loss: 0.5610 - photo_disc_loss: 0.5029
Epoch 33/40
30/30 - 18s - monet_gen_loss: 2.4889 - photo_gen_loss: 2.4432 - monet_disc_loss: 0.5155 - photo_disc_loss: 0.5091
Epoch 34/40
30/30 - 18s - monet_gen_loss: 2.4786 - photo_gen_loss: 2.4979 - monet_disc_loss: 0.6069 - photo_disc_loss: 0.5873
Epoch 35/40
30/30 - 18s - monet_gen_loss: 2.5389 - photo_gen_loss: 2.5599 - monet_disc_loss: 0.5752 - photo_disc_loss: 0.4884
Epoch 36/40
30/30 - 18s - monet_gen_loss: 2.7151 - photo_gen_loss: 2.7180 - monet_disc_loss: 0.5277 - photo_disc_loss: 0.4428
Epoch 37/40
30/30 - 18s - monet_gen_loss: 2.4422 - photo_gen_loss: 2.5225 - monet_disc_loss: 0.5285 - photo_disc_loss: 0.4875
Epoch 38/40
30/30 - 18s - monet_gen_loss: 2.7173 - photo_gen_loss: 2.6460 - monet_disc_loss: 0.6357 - photo_disc_loss: 0.5218
Epoch 39/40
30/30 - 18s - monet_gen_loss: 2.5986 - photo_gen_loss: 2.5894 - monet_disc_loss: 0.6222 - photo_disc_loss: 0.5361
Epoch 40/40
30/30 - 18s - monet_gen_loss: 2.6445 - photo_gen_loss: 2.4307 - monet_disc_loss: 0.5115 - photo_disc_loss: 0.4717
*** trained cycle gan final losses ***
{
    "monet_gen_loss": "2.644516",
    "photo_gen_loss": "2.430712",
    "monet_disc_loss": "0.5115418",
    "photo_disc_loss": "0.4717267"
}
*** Show trained model predictions sample ***
generator_network_structure_experiment loop (run_combination={'generator_network_structure': <GeneratorNetworkStructure.Deep: 'deep'>, 'train_images_selection_method': <TrainImagesSelectionMethod.FarthestImagesByPixelDistance: 'farthest_images_by_pixel_distance'>}): 100%|██████████| 8/8 [2:01:41<00:00, 912.65s/it]